use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testRestoreLocalHistoryFromTranslog.
@Test
public void testRestoreLocalHistoryFromTranslog() throws IOException {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
try (Store store = createStore()) {
final ArrayList<Long> seqNos = new ArrayList<>();
final int numOps = randomIntBetween(0, 1024);
for (int i = 0; i < numOps; i++) {
if (rarely()) {
continue;
}
seqNos.add((long) i);
}
Randomness.shuffle(seqNos);
final EngineConfig engineConfig;
final SeqNoStats prevSeqNoStats;
final List<DocIdSeqNoAndSource> prevDocs;
try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) {
engineConfig = engine.config();
for (final long seqNo : seqNos) {
final String id = Long.toString(seqNo);
final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null);
engine.index(replicaIndexForDoc(doc, 1, seqNo, false));
if (rarely()) {
engine.rollTranslogGeneration();
}
if (rarely()) {
engine.flush();
}
}
globalCheckpoint.set(randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, engine.getPersistedLocalCheckpoint()));
engine.syncTranslog();
prevSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get());
prevDocs = getDocIds(engine, true);
}
try (InternalEngine engine = new InternalEngine(engineConfig)) {
final long currentTranslogGeneration = engine.getTranslog().currentFileGeneration();
engine.recoverFromTranslog(translogHandler, globalCheckpoint.get());
engine.restoreLocalHistoryFromTranslog(translogHandler);
assertThat(getDocIds(engine, true), equalTo(prevDocs));
SeqNoStats seqNoStats = engine.getSeqNoStats(globalCheckpoint.get());
assertThat(seqNoStats.getLocalCheckpoint(), equalTo(prevSeqNoStats.getLocalCheckpoint()));
assertThat(seqNoStats.getMaxSeqNo(), equalTo(prevSeqNoStats.getMaxSeqNo()));
assertThat("restore from local translog must not add operations to translog", engine.getTranslog().totalOperationsByMinGen(currentTranslogGeneration), equalTo(0));
}
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, createMapperService("test"));
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testProducesStoredFieldsReader.
@Test
public void testProducesStoredFieldsReader() throws Exception {
// Make sure that the engine produces a SequentialStoredFieldsLeafReader.
// This is required for optimizations on SourceLookup to work, which is in-turn useful for runtime fields.
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField("test"), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
Engine.Index operation = randomBoolean() ? appendOnlyPrimary(doc, false, 1) : appendOnlyReplica(doc, false, 1, randomIntBetween(0, 5));
engine.index(operation);
engine.refresh("test");
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
IndexReader reader = searcher.getIndexReader();
assertThat(reader.leaves().size(), Matchers.greaterThanOrEqualTo(1));
for (LeafReaderContext context : reader.leaves()) {
assertThat(context.reader(), Matchers.instanceOf(SequentialStoredFieldsLeafReader.class));
SequentialStoredFieldsLeafReader lf = (SequentialStoredFieldsLeafReader) context.reader();
assertNotNull(lf.getSequentialStoredFieldsReader());
}
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testDeleteWithFatalError.
@Test
public void testDeleteWithFatalError() throws Exception {
final IllegalStateException tragicException = new IllegalStateException("fail to store tombstone");
try (Store store = createStore()) {
EngineConfig.TombstoneDocSupplier tombstoneDocSupplier = new EngineConfig.TombstoneDocSupplier() {
@Override
public ParsedDocument newDeleteTombstoneDoc(String id) {
ParsedDocument parsedDocument = tombstoneDocSupplier().newDeleteTombstoneDoc(id);
parsedDocument.rootDoc().add(new StoredField("foo", "bar") {
// this is a hack to add a failure during store document which triggers a tragic event
// and in turn fails the engine
@Override
public BytesRef binaryValue() {
throw tragicException;
}
});
return parsedDocument;
}
@Override
public ParsedDocument newNoopTombstoneDoc(String reason) {
return tombstoneDocSupplier().newNoopTombstoneDoc(reason);
}
};
EngineConfig config = config(this.engine.config(), store, createTempDir(), tombstoneDocSupplier);
try (InternalEngine engine = createEngine(config)) {
final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null);
engine.index(indexForDoc(doc));
expectThrows(IllegalStateException.class, () -> engine.delete(new Engine.Delete("1", newUid(doc), UNASSIGNED_SEQ_NO, primaryTerm.get(), Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0)));
assertTrue(engine.isClosed.get());
assertSame(tragicException, engine.failedEngine.get());
}
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testTrackMaxSeqNoOfUpdatesOrDeletesOnPrimary.
@Test
public void testTrackMaxSeqNoOfUpdatesOrDeletesOnPrimary() throws Exception {
engine.close();
Set<String> liveDocIds = new HashSet<>();
engine = new InternalEngine(engine.config());
assertThat(engine.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(-1L));
int numOps = between(1, 500);
for (int i = 0; i < numOps; i++) {
long currentMaxSeqNoOfUpdates = engine.getMaxSeqNoOfUpdatesOrDeletes();
ParsedDocument doc = createParsedDoc(Integer.toString(between(1, 100)), null);
if (randomBoolean()) {
Engine.IndexResult result = engine.index(indexForDoc(doc));
if (liveDocIds.add(doc.id()) == false) {
assertThat("update operations on primary must advance max_seq_no_of_updates", engine.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Math.max(currentMaxSeqNoOfUpdates, result.getSeqNo())));
} else {
assertThat("append operations should not advance max_seq_no_of_updates", engine.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(currentMaxSeqNoOfUpdates));
}
} else {
Engine.DeleteResult result = engine.delete(new Engine.Delete(doc.id(), newUid(doc.id()), UNASSIGNED_SEQ_NO, primaryTerm.get(), Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0));
liveDocIds.remove(doc.id());
assertThat("delete operations on primary must advance max_seq_no_of_updates", engine.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Math.max(currentMaxSeqNoOfUpdates, result.getSeqNo())));
}
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testVersionMapAfterAutoIDDocument.
@Test
public void testVersionMapAfterAutoIDDocument() throws IOException {
engine.refresh("warm_up");
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField("test"), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
Engine.Index operation = randomBoolean() ? appendOnlyPrimary(doc, false, 1) : appendOnlyReplica(doc, false, 1, randomIntBetween(0, 5));
engine.index(operation);
assertFalse(engine.isSafeAccessRequired());
doc = testParsedDocument("1", null, testDocumentWithTextField("updated"), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
Engine.Index update = indexForDoc(doc);
engine.index(update);
assertTrue(engine.isSafeAccessRequired());
assertThat(engine.getVersionMap().values(), hasSize(1));
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
assertEquals(0, searcher.getIndexReader().numDocs());
}
try (Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL)) {
assertEquals(1, searcher.getIndexReader().numDocs());
TopDocs search = searcher.search(new MatchAllDocsQuery(), 1);
org.apache.lucene.document.Document luceneDoc = searcher.doc(search.scoreDocs[0].doc);
assertEquals("test", luceneDoc.get("value"));
}
// now lets make this document visible
engine.refresh("test");
if (randomBoolean()) {
// random empty refresh
engine.refresh("test");
}
assertTrue("safe access should be required we carried it over", engine.isSafeAccessRequired());
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
assertEquals(1, searcher.getIndexReader().numDocs());
TopDocs search = searcher.search(new MatchAllDocsQuery(), 1);
org.apache.lucene.document.Document luceneDoc = searcher.doc(search.scoreDocs[0].doc);
assertEquals("updated", luceneDoc.get("value"));
}
doc = testParsedDocument("2", null, testDocumentWithTextField("test"), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
operation = randomBoolean() ? appendOnlyPrimary(doc, false, 1) : appendOnlyReplica(doc, false, 1, generateNewSeqNo(engine));
engine.index(operation);
assertTrue("safe access should be required", engine.isSafeAccessRequired());
// now we add this to the map
assertThat(engine.getVersionMap().values(), hasSize(1));
engine.refresh("test");
if (randomBoolean()) {
// randomly refresh here again
engine.refresh("test");
}
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
assertEquals(2, searcher.getIndexReader().numDocs());
}
if (operation.origin() == PRIMARY) {
assertFalse("safe access should NOT be required last indexing round was only append only", engine.isSafeAccessRequired());
}
engine.delete(new Engine.Delete(operation.id(), operation.uid(), UNASSIGNED_SEQ_NO, primaryTerm.get(), Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0));
assertTrue("safe access should be required", engine.isSafeAccessRequired());
engine.refresh("test");
assertTrue("safe access should be required", engine.isSafeAccessRequired());
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
assertEquals(1, searcher.getIndexReader().numDocs());
}
}
Aggregations