use of org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project OpenSearch by opensearch-project.
the class InternalEngineTests method testSyncedFlushSurvivesEngineRestart.
public void testSyncedFlushSurvivesEngineRestart() throws IOException {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
IOUtils.close(store, engine);
SetOnce<IndexWriter> indexWriterHolder = new SetOnce<>();
IndexWriterFactory indexWriterFactory = (directory, iwc) -> {
indexWriterHolder.set(new IndexWriter(directory, iwc));
return indexWriterHolder.get();
};
store = createStore();
engine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), indexWriterFactory, null, globalCheckpoint::get);
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null);
engine.index(indexForDoc(doc));
globalCheckpoint.set(0L);
engine.flush();
syncFlush(indexWriterHolder.get(), engine, syncId);
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
EngineConfig config = engine.config();
if (randomBoolean()) {
engine.close();
} else {
engine.flushAndClose();
}
if (randomBoolean()) {
final String translogUUID = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), UNASSIGNED_SEQ_NO, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
}
engine = new InternalEngine(config);
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
}
use of org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project OpenSearch by opensearch-project.
the class InternalEngineTests method testTrimUnsafeCommits.
public void testTrimUnsafeCommits() throws Exception {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final int maxSeqNo = 40;
final List<Long> seqNos = LongStream.rangeClosed(0, maxSeqNo).boxed().collect(Collectors.toList());
Collections.shuffle(seqNos, random());
try (Store store = createStore()) {
EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get);
final List<Long> commitMaxSeqNo = new ArrayList<>();
final long minTranslogGen;
try (InternalEngine engine = createEngine(config)) {
for (int i = 0; i < seqNos.size(); i++) {
ParsedDocument doc = testParsedDocument(Long.toString(seqNos.get(i)), null, testDocument(), new BytesArray("{}"), null);
Engine.Index index = new Engine.Index(newUid(doc), doc, seqNos.get(i), 0, 1, null, REPLICA, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);
engine.index(index);
if (randomBoolean()) {
engine.flush();
final Long maxSeqNoInCommit = seqNos.subList(0, i + 1).stream().max(Long::compareTo).orElse(-1L);
commitMaxSeqNo.add(maxSeqNoInCommit);
}
}
globalCheckpoint.set(randomInt(maxSeqNo));
engine.syncTranslog();
minTranslogGen = engine.getTranslog().getMinFileGeneration();
}
store.trimUnsafeCommits(globalCheckpoint.get(), minTranslogGen, config.getIndexSettings().getIndexVersionCreated());
long safeMaxSeqNo = commitMaxSeqNo.stream().filter(s -> s <= globalCheckpoint.get()).reduce(// get the last one.
(s1, s2) -> s2).orElse(SequenceNumbers.NO_OPS_PERFORMED);
final List<IndexCommit> commits = DirectoryReader.listCommits(store.directory());
assertThat(commits, hasSize(1));
assertThat(commits.get(0).getUserData().get(SequenceNumbers.MAX_SEQ_NO), equalTo(Long.toString(safeMaxSeqNo)));
try (IndexReader reader = DirectoryReader.open(commits.get(0))) {
for (LeafReaderContext context : reader.leaves()) {
final NumericDocValues values = context.reader().getNumericDocValues(SeqNoFieldMapper.NAME);
if (values != null) {
for (int docID = 0; docID < context.reader().maxDoc(); docID++) {
if (values.advanceExact(docID) == false) {
throw new AssertionError("Document does not have a seq number: " + docID);
}
assertThat(values.longValue(), lessThanOrEqualTo(globalCheckpoint.get()));
}
}
}
}
}
}
use of org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project OpenSearch by opensearch-project.
the class InternalEngineTests method testTreatDocumentFailureAsFatalError.
public void testTreatDocumentFailureAsFatalError() throws Exception {
AtomicReference<IOException> addDocException = new AtomicReference<>();
IndexWriterFactory indexWriterFactory = (dir, iwc) -> new IndexWriter(dir, iwc) {
@Override
public long addDocument(Iterable<? extends IndexableField> doc) throws IOException {
final IOException ex = addDocException.getAndSet(null);
if (ex != null) {
throw ex;
}
return super.addDocument(doc);
}
};
try (Store store = createStore();
InternalEngine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, indexWriterFactory)) {
final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null);
Engine.Operation.Origin origin = randomFrom(REPLICA, LOCAL_RESET, PEER_RECOVERY);
Engine.Index index = new Engine.Index(newUid(doc), doc, randomNonNegativeLong(), primaryTerm.get(), randomNonNegativeLong(), null, origin, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM);
addDocException.set(new IOException("simulated"));
expectThrows(IOException.class, () -> engine.index(index));
assertTrue(engine.isClosed.get());
assertNotNull(engine.failedEngine.get());
}
}
use of org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project OpenSearch by opensearch-project.
the class InternalEngineTests method testConcurrentOutOfOrderDocsOnReplica.
public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, InterruptedException {
final List<Engine.Operation> opsDoc1 = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 100, 300, "1");
final Engine.Operation lastOpDoc1 = opsDoc1.get(opsDoc1.size() - 1);
final String lastFieldValueDoc1;
if (lastOpDoc1 instanceof Engine.Index) {
Engine.Index index = (Engine.Index) lastOpDoc1;
lastFieldValueDoc1 = index.docs().get(0).get("value");
} else {
// delete
lastFieldValueDoc1 = null;
}
final List<Engine.Operation> opsDoc2 = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 100, 300, "2");
final Engine.Operation lastOpDoc2 = opsDoc2.get(opsDoc2.size() - 1);
final String lastFieldValueDoc2;
if (lastOpDoc2 instanceof Engine.Index) {
Engine.Index index = (Engine.Index) lastOpDoc2;
lastFieldValueDoc2 = index.docs().get(0).get("value");
} else {
// delete
lastFieldValueDoc2 = null;
}
// randomly interleave
final AtomicLong seqNoGenerator = new AtomicLong();
BiFunction<Engine.Operation, Long, Engine.Operation> seqNoUpdater = (operation, newSeqNo) -> {
if (operation instanceof Engine.Index) {
Engine.Index index = (Engine.Index) operation;
Document doc = testDocumentWithTextField(index.docs().get(0).get("value"));
ParsedDocument parsedDocument = testParsedDocument(index.id(), index.routing(), doc, index.source(), null);
return new Engine.Index(index.uid(), parsedDocument, newSeqNo, index.primaryTerm(), index.version(), index.versionType(), index.origin(), index.startTime(), index.getAutoGeneratedIdTimestamp(), index.isRetry(), UNASSIGNED_SEQ_NO, 0);
} else {
Engine.Delete delete = (Engine.Delete) operation;
return new Engine.Delete(delete.type(), delete.id(), delete.uid(), newSeqNo, delete.primaryTerm(), delete.version(), delete.versionType(), delete.origin(), delete.startTime(), UNASSIGNED_SEQ_NO, 0);
}
};
final List<Engine.Operation> allOps = new ArrayList<>();
Iterator<Engine.Operation> iter1 = opsDoc1.iterator();
Iterator<Engine.Operation> iter2 = opsDoc2.iterator();
while (iter1.hasNext() && iter2.hasNext()) {
final Engine.Operation next = randomBoolean() ? iter1.next() : iter2.next();
allOps.add(seqNoUpdater.apply(next, seqNoGenerator.getAndIncrement()));
}
iter1.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement())));
iter2.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement())));
// insert some duplicates
randomSubsetOf(allOps).forEach(op -> allOps.add(seqNoUpdater.apply(op, op.seqNo())));
shuffle(allOps, random());
concurrentlyApplyOps(allOps, engine);
engine.refresh("test");
if (lastFieldValueDoc1 != null) {
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
final TotalHitCountCollector collector = new TotalHitCountCollector();
searcher.search(new TermQuery(new Term("value", lastFieldValueDoc1)), collector);
assertThat(collector.getTotalHits(), equalTo(1));
}
}
if (lastFieldValueDoc2 != null) {
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
final TotalHitCountCollector collector = new TotalHitCountCollector();
searcher.search(new TermQuery(new Term("value", lastFieldValueDoc2)), collector);
assertThat(collector.getTotalHits(), equalTo(1));
}
}
int totalExpectedOps = 0;
if (lastFieldValueDoc1 != null) {
totalExpectedOps++;
}
if (lastFieldValueDoc2 != null) {
totalExpectedOps++;
}
assertVisibleCount(engine, totalExpectedOps);
}
use of org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO in project OpenSearch by opensearch-project.
the class PeerRecoveryRetentionLeaseExpiryTests method setUpReplicationTracker.
@Before
public void setUpReplicationTracker() throws InterruptedException {
final AllocationId primaryAllocationId = AllocationId.newInitializing();
currentTimeMillis = new AtomicLong(randomLongBetween(0, 1024));
if (randomBoolean()) {
settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), TimeValue.timeValueMillis(randomLongBetween(1, TimeValue.timeValueHours(12).millis()))).build();
} else {
settings = Settings.EMPTY;
}
// must be set in each test
safeCommitInfo = null;
final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE);
replicationTracker = new ReplicationTracker(new ShardId("test", "_na", 0), primaryAllocationId.getId(), IndexSettingsModule.newIndexSettings("test", settings), primaryTerm, UNASSIGNED_SEQ_NO, value -> {
}, currentTimeMillis::get, (leases, listener) -> {
}, () -> safeCommitInfo);
replicationTracker.updateFromMaster(1L, Collections.singleton(primaryAllocationId.getId()), routingTable(Collections.emptySet(), primaryAllocationId));
replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED);
final AllocationId replicaAllocationId = AllocationId.newInitializing();
final IndexShardRoutingTable routingTableWithReplica = routingTable(Collections.singleton(replicaAllocationId), primaryAllocationId);
replicationTracker.updateFromMaster(2L, Collections.singleton(primaryAllocationId.getId()), routingTableWithReplica);
replicationTracker.addPeerRecoveryRetentionLease(routingTableWithReplica.getByAllocationId(replicaAllocationId.getId()).currentNodeId(), randomCheckpoint(), EMPTY_LISTENER);
replicationTracker.initiateTracking(replicaAllocationId.getId());
replicationTracker.markAllocationIdAsInSync(replicaAllocationId.getId(), randomCheckpoint());
}
Aggregations