use of org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project crate by crate.
the class InternalEngineTests method testLookupSeqNoByIdInLucene.
@Test
public void testLookupSeqNoByIdInLucene() throws Exception {
int numOps = between(10, 100);
long seqNo = 0;
List<Engine.Operation> operations = new ArrayList<>(numOps);
for (int i = 0; i < numOps; i++) {
String id = Integer.toString(between(1, 50));
boolean isIndexing = randomBoolean();
int copies = frequently() ? 1 : between(2, 4);
for (int c = 0; c < copies; c++) {
final ParsedDocument doc = EngineTestCase.createParsedDoc(id, null);
if (isIndexing) {
operations.add(new Engine.Index(EngineTestCase.newUid(doc), doc, seqNo, primaryTerm.get(), i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(), -1, true, UNASSIGNED_SEQ_NO, 0L));
} else {
operations.add(new Engine.Delete(doc.id(), EngineTestCase.newUid(doc), seqNo, primaryTerm.get(), i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(), UNASSIGNED_SEQ_NO, 0L));
}
}
seqNo++;
if (rarely()) {
seqNo++;
}
}
Randomness.shuffle(operations);
Settings.Builder settings = Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true);
final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
// id -> latest seq_no
Map<String, Engine.Operation> latestOps = new HashMap<>();
try (Store store = createStore();
InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null))) {
CheckedRunnable<IOException> lookupAndCheck = () -> {
try (Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL)) {
Map<String, Long> liveOps = latestOps.entrySet().stream().filter(e -> e.getValue().operationType() == Engine.Operation.TYPE.INDEX).collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue().seqNo()));
assertThat(getDocIds(engine, true).stream().collect(Collectors.toMap(e -> e.getId(), e -> e.getSeqNo())), equalTo(liveOps));
for (String id : latestOps.keySet()) {
String msg = "latestOps=" + latestOps + " op=" + id;
DocIdAndSeqNo docIdAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.getIndexReader(), newUid(id));
if (liveOps.containsKey(id) == false) {
assertNull(msg, docIdAndSeqNo);
} else {
assertNotNull(msg, docIdAndSeqNo);
assertThat(msg, docIdAndSeqNo.seqNo, equalTo(latestOps.get(id).seqNo()));
}
}
String notFoundId = randomValueOtherThanMany(liveOps::containsKey, () -> Long.toString(randomNonNegativeLong()));
assertNull(VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.getIndexReader(), newUid(notFoundId)));
}
};
for (Engine.Operation op : operations) {
if (op instanceof Engine.Index) {
engine.index((Engine.Index) op);
if (latestOps.containsKey(op.id()) == false || latestOps.get(op.id()).seqNo() < op.seqNo()) {
latestOps.put(op.id(), op);
}
} else if (op instanceof Engine.Delete) {
engine.delete((Engine.Delete) op);
if (latestOps.containsKey(op.id()) == false || latestOps.get(op.id()).seqNo() < op.seqNo()) {
latestOps.put(op.id(), op);
}
}
if (randomInt(100) < 10) {
engine.refresh("test");
lookupAndCheck.run();
}
if (rarely()) {
engine.flush(false, true);
lookupAndCheck.run();
}
}
engine.refresh("test");
lookupAndCheck.run();
}
}
use of org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project crate by crate.
the class InternalEngineTests method testConcurrentOutOfOrderDocsOnReplica.
@Test
public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, InterruptedException {
final List<Engine.Operation> opsDoc1 = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 100, 300, "1");
final Engine.Operation lastOpDoc1 = opsDoc1.get(opsDoc1.size() - 1);
final String lastFieldValueDoc1;
if (lastOpDoc1 instanceof Engine.Index) {
Engine.Index index = (Engine.Index) lastOpDoc1;
lastFieldValueDoc1 = index.docs().get(0).get("value");
} else {
// delete
lastFieldValueDoc1 = null;
}
final List<Engine.Operation> opsDoc2 = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 100, 300, "2");
final Engine.Operation lastOpDoc2 = opsDoc2.get(opsDoc2.size() - 1);
final String lastFieldValueDoc2;
if (lastOpDoc2 instanceof Engine.Index) {
Engine.Index index = (Engine.Index) lastOpDoc2;
lastFieldValueDoc2 = index.docs().get(0).get("value");
} else {
// delete
lastFieldValueDoc2 = null;
}
// randomly interleave
final AtomicLong seqNoGenerator = new AtomicLong();
BiFunction<Engine.Operation, Long, Engine.Operation> seqNoUpdater = (operation, newSeqNo) -> {
if (operation instanceof Engine.Index) {
Engine.Index index = (Engine.Index) operation;
Document doc = testDocumentWithTextField(index.docs().get(0).get("value"));
ParsedDocument parsedDocument = testParsedDocument(index.id(), index.routing(), doc, index.source(), null);
return new Engine.Index(index.uid(), parsedDocument, newSeqNo, index.primaryTerm(), index.version(), index.versionType(), index.origin(), index.startTime(), index.getAutoGeneratedIdTimestamp(), index.isRetry(), UNASSIGNED_SEQ_NO, 0);
} else {
Engine.Delete delete = (Engine.Delete) operation;
return new Engine.Delete(delete.id(), delete.uid(), newSeqNo, delete.primaryTerm(), delete.version(), delete.versionType(), delete.origin(), delete.startTime(), UNASSIGNED_SEQ_NO, 0);
}
};
final List<Engine.Operation> allOps = new ArrayList<>();
Iterator<Engine.Operation> iter1 = opsDoc1.iterator();
Iterator<Engine.Operation> iter2 = opsDoc2.iterator();
while (iter1.hasNext() && iter2.hasNext()) {
final Engine.Operation next = randomBoolean() ? iter1.next() : iter2.next();
allOps.add(seqNoUpdater.apply(next, seqNoGenerator.getAndIncrement()));
}
iter1.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement())));
iter2.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement())));
// insert some duplicates
randomSubsetOf(allOps).forEach(op -> allOps.add(seqNoUpdater.apply(op, op.seqNo())));
shuffle(allOps, random());
concurrentlyApplyOps(allOps, engine);
engine.refresh("test");
if (lastFieldValueDoc1 != null) {
try (Searcher searcher = engine.acquireSearcher("test")) {
final TotalHitCountCollector collector = new TotalHitCountCollector();
searcher.search(new TermQuery(new Term("value", lastFieldValueDoc1)), collector);
assertThat(collector.getTotalHits(), equalTo(1));
}
}
if (lastFieldValueDoc2 != null) {
try (Searcher searcher = engine.acquireSearcher("test")) {
final TotalHitCountCollector collector = new TotalHitCountCollector();
searcher.search(new TermQuery(new Term("value", lastFieldValueDoc2)), collector);
assertThat(collector.getTotalHits(), equalTo(1));
}
}
int totalExpectedOps = 0;
if (lastFieldValueDoc1 != null) {
totalExpectedOps++;
}
if (lastFieldValueDoc2 != null) {
totalExpectedOps++;
}
assertVisibleCount(engine, totalExpectedOps);
}
use of org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project crate by crate.
the class InternalEngineTests method testSeqNoGenerator.
@Test
public void testSeqNoGenerator() throws IOException {
engine.close();
final long seqNo = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Integer.MAX_VALUE);
final BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier = (ms, lcp) -> new LocalCheckpointTracker(SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED);
final AtomicLong seqNoGenerator = new AtomicLong(seqNo);
try (Engine e = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, localCheckpointTrackerSupplier, null, (engine, operation) -> seqNoGenerator.getAndIncrement())) {
final String id = "id";
final Field uidField = new Field("_id", id, IdFieldMapper.Defaults.FIELD_TYPE);
final Field versionField = new NumericDocValuesField("_version", 0);
final SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
final ParseContext.Document document = new ParseContext.Document();
document.add(uidField);
document.add(versionField);
document.add(seqID.seqNo);
document.add(seqID.seqNoDocValue);
document.add(seqID.primaryTerm);
final BytesReference source = new BytesArray(new byte[] { 1 });
final ParsedDocument parsedDocument = new ParsedDocument(versionField, seqID, id, "routing", Collections.singletonList(document), source, null);
final Engine.Index index = new Engine.Index(new Term("_id", parsedDocument.id()), parsedDocument, UNASSIGNED_SEQ_NO, randomIntBetween(1, 8), Versions.NOT_FOUND, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, randomBoolean(), UNASSIGNED_SEQ_NO, 0);
final Engine.IndexResult indexResult = e.index(index);
assertThat(indexResult.getSeqNo(), equalTo(seqNo));
assertThat(seqNoGenerator.get(), equalTo(seqNo + 1));
final Engine.Delete delete = new Engine.Delete(id, new Term("_id", parsedDocument.id()), UNASSIGNED_SEQ_NO, randomIntBetween(1, 8), Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0);
final Engine.DeleteResult deleteResult = e.delete(delete);
assertThat(deleteResult.getSeqNo(), equalTo(seqNo + 1));
assertThat(seqNoGenerator.get(), equalTo(seqNo + 2));
}
}
use of org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project crate by crate.
the class PeerRecoveryRetentionLeaseExpiryTests method setUpReplicationTracker.
@Before
public void setUpReplicationTracker() throws InterruptedException {
final AllocationId primaryAllocationId = AllocationId.newInitializing();
currentTimeMillis = new AtomicLong(randomLongBetween(0, 1024));
if (randomBoolean()) {
settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), TimeValue.timeValueMillis(randomLongBetween(1, TimeValue.timeValueHours(12).millis())).getStringRep()).build();
} else {
settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build();
}
// must be set in each test
safeCommitInfo = null;
final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE);
replicationTracker = new ReplicationTracker(new ShardId("test", "_na", 0), primaryAllocationId.getId(), IndexSettingsModule.newIndexSettings("test", settings), primaryTerm, UNASSIGNED_SEQ_NO, value -> {
}, currentTimeMillis::get, (leases, listener) -> {
}, () -> safeCommitInfo);
replicationTracker.updateFromMaster(1L, Collections.singleton(primaryAllocationId.getId()), routingTable(Collections.emptySet(), primaryAllocationId));
replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED);
final AllocationId replicaAllocationId = AllocationId.newInitializing();
final IndexShardRoutingTable routingTableWithReplica = routingTable(Collections.singleton(replicaAllocationId), primaryAllocationId);
replicationTracker.updateFromMaster(2L, Collections.singleton(primaryAllocationId.getId()), routingTableWithReplica);
replicationTracker.addPeerRecoveryRetentionLease(routingTableWithReplica.getByAllocationId(replicaAllocationId.getId()).currentNodeId(), randomCheckpoint(), EMPTY_LISTENER);
replicationTracker.initiateTracking(replicaAllocationId.getId());
replicationTracker.markAllocationIdAsInSync(replicaAllocationId.getId(), randomCheckpoint());
}
use of org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project crate by crate.
the class PeerRecoveryTargetService method doRecovery.
private void doRecovery(final long recoveryId) {
final StartRecoveryRequest request;
final RecoveryState.Timer timer;
CancellableThreads cancellableThreads;
try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
if (recoveryRef == null) {
LOGGER.trace("not running recovery with id [{}] - can not find it (probably finished)", recoveryId);
return;
}
final RecoveryTarget recoveryTarget = recoveryRef.target();
timer = recoveryTarget.state().getTimer();
cancellableThreads = recoveryTarget.cancellableThreads();
try {
assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node";
LOGGER.trace("{} preparing shard for peer recovery", recoveryTarget.shardId());
recoveryTarget.indexShard().prepareForIndexRecovery();
final long startingSeqNo = recoveryTarget.indexShard().recoverLocallyUpToGlobalCheckpoint();
assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG : "unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]";
request = getStartRecoveryRequest(LOGGER, clusterService.localNode(), recoveryTarget, startingSeqNo);
} catch (final Exception e) {
// this will be logged as warning later on...
LOGGER.trace("unexpected error while preparing shard for peer recovery, failing recovery", e);
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), true);
return;
}
}
Consumer<Exception> handleException = e -> {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(() -> new ParameterizedMessage("[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e);
}
Throwable cause = SQLExceptions.unwrap(e);
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
// this can also come from the source wrapped in a RemoteTransportException
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source has canceled the recovery", cause), false);
return;
}
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
// do it twice, in case we have double transport exception
cause = SQLExceptions.unwrap(cause);
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof ShardNotFoundException) {
// if the target is not ready yet, retry
retryRecovery(recoveryId, "remote shard not ready", recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof DelayRecoveryException) {
retryRecovery(recoveryId, cause, recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof ConnectTransportException) {
LOGGER.debug("delaying recovery of {} for [{}] due to networking error [{}]", request.shardId(), recoverySettings.retryDelayNetwork(), cause.getMessage());
retryRecovery(recoveryId, cause.getMessage(), recoverySettings.retryDelayNetwork(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof AlreadyClosedException) {
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false);
return;
}
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, e), true);
};
try {
LOGGER.trace("{} starting recovery from {}", request.shardId(), request.sourceNode());
cancellableThreads.executeIO(() -> transportService.sendRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request, new TransportResponseHandler<RecoveryResponse>() {
@Override
public void handleResponse(RecoveryResponse recoveryResponse) {
final TimeValue recoveryTime = new TimeValue(timer.time());
// do this through ongoing recoveries to remove it from the collection
onGoingRecoveries.markRecoveryAsDone(recoveryId);
if (LOGGER.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()).append("] ");
sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n");
sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]").append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']').append("\n");
sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations").append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]").append("\n");
LOGGER.trace("{}", sb);
} else {
LOGGER.debug("{} recovery done from [{}], took [{}]", request.shardId(), request.sourceNode(), recoveryTime);
}
}
@Override
public void handleException(TransportException e) {
handleException.accept(e);
}
@Override
public String executor() {
// we do some heavy work like refreshes in the response so fork off to the generic threadpool
return ThreadPool.Names.GENERIC;
}
@Override
public RecoveryResponse read(StreamInput in) throws IOException {
return new RecoveryResponse(in);
}
}));
} catch (CancellableThreads.ExecutionCancelledException e) {
LOGGER.trace("recovery cancelled", e);
} catch (Exception e) {
handleException.accept(e);
}
}
Aggregations