use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testShouldPeriodicallyFlush.
@Test
public void testShouldPeriodicallyFlush() throws Exception {
assertThat("Empty engine does not need flushing", engine.shouldPeriodicallyFlush(), equalTo(false));
// A new engine may have more than one empty translog files - the test should account this extra.
final Translog translog = engine.getTranslog();
final IntSupplier uncommittedTranslogOperationsSinceLastCommit = () -> {
long localCheckpoint = Long.parseLong(engine.getLastCommittedSegmentInfos().userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY));
return translog.totalOperationsByMinGen(translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration);
};
final long extraTranslogSizeInNewEngine = engine.getTranslog().stats().getUncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES;
int numDocs = between(10, 100);
for (int id = 0; id < numDocs; id++) {
final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null);
engine.index(indexForDoc(doc));
}
assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false));
long flushThreshold = RandomNumbers.randomLongBetween(random(), 120, engine.getTranslog().stats().getUncommittedSizeInBytes() - extraTranslogSizeInNewEngine);
final IndexSettings indexSettings = engine.config().getIndexSettings();
final IndexMetadata indexMetadata = IndexMetadata.builder(indexSettings.getIndexMetadata()).settings(Settings.builder().put(indexSettings.getSettings()).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), flushThreshold + "b")).build();
indexSettings.updateIndexMetadata(indexMetadata);
engine.onSettingsChanged(indexSettings.getTranslogRetentionAge(), indexSettings.getTranslogRetentionSize(), indexSettings.getSoftDeleteRetentionOperations());
assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(numDocs));
assertThat(engine.shouldPeriodicallyFlush(), equalTo(true));
engine.flush();
assertThat(uncommittedTranslogOperationsSinceLastCommit.getAsInt(), equalTo(0));
// Stale operations skipped by Lucene but added to translog - still able to flush
for (int id = 0; id < numDocs; id++) {
final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null);
final Engine.IndexResult result = engine.index(replicaIndexForDoc(doc, 1L, id, false));
assertThat(result.isCreated(), equalTo(false));
}
SegmentInfos lastCommitInfo = engine.getLastCommittedSegmentInfos();
assertThat(uncommittedTranslogOperationsSinceLastCommit.getAsInt(), equalTo(numDocs));
assertThat(engine.shouldPeriodicallyFlush(), equalTo(true));
engine.flush(false, false);
assertThat(engine.getLastCommittedSegmentInfos(), not(sameInstance(lastCommitInfo)));
assertThat(uncommittedTranslogOperationsSinceLastCommit.getAsInt(), equalTo(0));
// If the new index commit still points to the same translog generation as the current index commit,
// we should not enable the periodically flush condition; otherwise we can get into an infinite loop of flushes.
// create a gap here
generateNewSeqNo(engine);
for (int id = 0; id < numDocs; id++) {
if (randomBoolean()) {
translog.rollGeneration();
}
final ParsedDocument doc = testParsedDocument("new" + id, null, testDocumentWithTextField(), SOURCE, null);
engine.index(replicaIndexForDoc(doc, 2L, generateNewSeqNo(engine), false));
if (engine.shouldPeriodicallyFlush()) {
engine.flush();
assertThat(engine.getLastCommittedSegmentInfos(), not(sameInstance(lastCommitInfo)));
assertThat(engine.shouldPeriodicallyFlush(), equalTo(false));
}
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testTranslogReplay.
@Test
public void testTranslogReplay() throws IOException {
final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getProcessedLocalCheckpoint();
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);
Engine.IndexResult indexResult = engine.index(firstIndexRequest);
assertThat(indexResult.getVersion(), equalTo(1L));
}
assertVisibleCount(engine, numDocs);
translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings());
engine.close();
// we need to reuse the engine config unless the parser.mappingModified won't work
engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier));
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
engine.refresh("warm_up");
assertVisibleCount(engine, numDocs, false);
engine.close();
translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings());
engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier);
engine.refresh("warm_up");
assertVisibleCount(engine, numDocs, false);
final boolean flush = randomBoolean();
int randomId = randomIntBetween(numDocs + 1, numDocs + 10);
ParsedDocument doc = testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null);
Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);
Engine.IndexResult indexResult = engine.index(firstIndexRequest);
assertThat(indexResult.getVersion(), equalTo(1L));
if (flush) {
engine.flush();
engine.refresh("test");
}
doc = testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null);
Engine.Index idxRequest = new Engine.Index(newUid(doc), doc, UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);
Engine.IndexResult result = engine.index(idxRequest);
engine.refresh("test");
assertThat(result.getVersion(), equalTo(2L));
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), numDocs + 1);
assertThat(topDocs.totalHits.value, equalTo(numDocs + 1L));
}
engine.close();
translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings());
engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier);
engine.refresh("warm_up");
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), numDocs + 1);
assertThat(topDocs.totalHits.value, equalTo(numDocs + 1L));
}
engine.delete(new Engine.Delete(Integer.toString(randomId), newUid(doc), UNASSIGNED_SEQ_NO, primaryTerm.get(), Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0));
if (randomBoolean()) {
engine.close();
engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier);
}
engine.refresh("test");
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), numDocs);
assertThat(topDocs.totalHits.value, equalTo((long) numDocs));
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testTranslogMultipleOperationsSameDocument.
@Test
public void testTranslogMultipleOperationsSameDocument() throws IOException {
final int ops = randomIntBetween(1, 32);
Engine initialEngine;
final List<Engine.Operation> operations = new ArrayList<>();
try {
initialEngine = engine;
for (int i = 0; i < ops; i++) {
final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null);
if (randomBoolean()) {
final Engine.Index operation = new Engine.Index(newUid(doc), doc, UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);
operations.add(operation);
initialEngine.index(operation);
} else {
final Engine.Delete operation = new Engine.Delete("1", newUid(doc), UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0);
operations.add(operation);
initialEngine.delete(operation);
}
}
} finally {
IOUtils.close(engine);
}
try (Engine recoveringEngine = new InternalEngine(engine.config())) {
recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
recoveringEngine.refresh("test");
try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) {
final TotalHitCountCollector collector = new TotalHitCountCollector();
searcher.search(new MatchAllDocsQuery(), collector);
assertThat(collector.getTotalHits(), equalTo(operations.get(operations.size() - 1) instanceof Engine.Delete ? 0 : 1));
}
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testConcurrentOutOfOrderDocsOnReplica.
@Test
public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, InterruptedException {
final List<Engine.Operation> opsDoc1 = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 100, 300, "1");
final Engine.Operation lastOpDoc1 = opsDoc1.get(opsDoc1.size() - 1);
final String lastFieldValueDoc1;
if (lastOpDoc1 instanceof Engine.Index) {
Engine.Index index = (Engine.Index) lastOpDoc1;
lastFieldValueDoc1 = index.docs().get(0).get("value");
} else {
// delete
lastFieldValueDoc1 = null;
}
final List<Engine.Operation> opsDoc2 = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 100, 300, "2");
final Engine.Operation lastOpDoc2 = opsDoc2.get(opsDoc2.size() - 1);
final String lastFieldValueDoc2;
if (lastOpDoc2 instanceof Engine.Index) {
Engine.Index index = (Engine.Index) lastOpDoc2;
lastFieldValueDoc2 = index.docs().get(0).get("value");
} else {
// delete
lastFieldValueDoc2 = null;
}
// randomly interleave
final AtomicLong seqNoGenerator = new AtomicLong();
BiFunction<Engine.Operation, Long, Engine.Operation> seqNoUpdater = (operation, newSeqNo) -> {
if (operation instanceof Engine.Index) {
Engine.Index index = (Engine.Index) operation;
Document doc = testDocumentWithTextField(index.docs().get(0).get("value"));
ParsedDocument parsedDocument = testParsedDocument(index.id(), index.routing(), doc, index.source(), null);
return new Engine.Index(index.uid(), parsedDocument, newSeqNo, index.primaryTerm(), index.version(), index.versionType(), index.origin(), index.startTime(), index.getAutoGeneratedIdTimestamp(), index.isRetry(), UNASSIGNED_SEQ_NO, 0);
} else {
Engine.Delete delete = (Engine.Delete) operation;
return new Engine.Delete(delete.id(), delete.uid(), newSeqNo, delete.primaryTerm(), delete.version(), delete.versionType(), delete.origin(), delete.startTime(), UNASSIGNED_SEQ_NO, 0);
}
};
final List<Engine.Operation> allOps = new ArrayList<>();
Iterator<Engine.Operation> iter1 = opsDoc1.iterator();
Iterator<Engine.Operation> iter2 = opsDoc2.iterator();
while (iter1.hasNext() && iter2.hasNext()) {
final Engine.Operation next = randomBoolean() ? iter1.next() : iter2.next();
allOps.add(seqNoUpdater.apply(next, seqNoGenerator.getAndIncrement()));
}
iter1.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement())));
iter2.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement())));
// insert some duplicates
randomSubsetOf(allOps).forEach(op -> allOps.add(seqNoUpdater.apply(op, op.seqNo())));
shuffle(allOps, random());
concurrentlyApplyOps(allOps, engine);
engine.refresh("test");
if (lastFieldValueDoc1 != null) {
try (Searcher searcher = engine.acquireSearcher("test")) {
final TotalHitCountCollector collector = new TotalHitCountCollector();
searcher.search(new TermQuery(new Term("value", lastFieldValueDoc1)), collector);
assertThat(collector.getTotalHits(), equalTo(1));
}
}
if (lastFieldValueDoc2 != null) {
try (Searcher searcher = engine.acquireSearcher("test")) {
final TotalHitCountCollector collector = new TotalHitCountCollector();
searcher.search(new TermQuery(new Term("value", lastFieldValueDoc2)), collector);
assertThat(collector.getTotalHits(), equalTo(1));
}
}
int totalExpectedOps = 0;
if (lastFieldValueDoc1 != null) {
totalExpectedOps++;
}
if (lastFieldValueDoc2 != null) {
totalExpectedOps++;
}
assertVisibleCount(engine, totalExpectedOps);
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testIndexWriterInfoStream.
// #5891: make sure IndexWriter's infoStream output is
// sent to lucene.iw with log level TRACE:
@Test
public void testIndexWriterInfoStream() throws IllegalAccessException, IOException {
assumeFalse("who tests the tester?", VERBOSE);
MockAppender mockAppender = new MockAppender("testIndexWriterInfoStream");
mockAppender.start();
Logger rootLogger = LogManager.getRootLogger();
Level savedLevel = rootLogger.getLevel();
Loggers.addAppender(rootLogger, mockAppender);
Loggers.setLevel(rootLogger, Level.DEBUG);
rootLogger = LogManager.getRootLogger();
try {
// First, with DEBUG, which should NOT log IndexWriter output:
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null);
engine.index(indexForDoc(doc));
engine.flush();
assertFalse(mockAppender.sawIndexWriterMessage);
// Again, with TRACE, which should log IndexWriter output:
Loggers.setLevel(rootLogger, Level.TRACE);
engine.index(indexForDoc(doc));
engine.flush();
assertTrue(mockAppender.sawIndexWriterMessage);
} finally {
Loggers.removeAppender(rootLogger, mockAppender);
mockAppender.stop();
Loggers.setLevel(rootLogger, savedLevel);
}
}
Aggregations