use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testSyncedFlushSurvivesEngineRestart.
@Test
public void testSyncedFlushSurvivesEngineRestart() throws IOException {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
IOUtils.close(store, engine);
store = createStore();
engine = createEngine(store, primaryTranslogDir, globalCheckpoint::get);
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null);
engine.index(indexForDoc(doc));
globalCheckpoint.set(0L);
final Engine.CommitId commitID = engine.flush();
assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), Engine.SyncedFlushResult.SUCCESS);
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
EngineConfig config = engine.config();
if (randomBoolean()) {
engine.close();
} else {
engine.flushAndClose();
}
if (randomBoolean()) {
final String translogUUID = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), UNASSIGNED_SEQ_NO, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
}
engine = new InternalEngine(config);
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testStressShouldPeriodicallyFlush.
@Test
public void testStressShouldPeriodicallyFlush() throws Exception {
final long flushThreshold = randomLongBetween(120, 5000);
final long generationThreshold = randomLongBetween(1000, 5000);
final IndexSettings indexSettings = engine.config().getIndexSettings();
final IndexMetadata indexMetadata = IndexMetadata.builder(indexSettings.getIndexMetadata()).settings(Settings.builder().put(indexSettings.getSettings()).put(IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.getKey(), generationThreshold + "b").put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), flushThreshold + "b")).build();
indexSettings.updateIndexMetadata(indexMetadata);
engine.onSettingsChanged(indexSettings.getTranslogRetentionAge(), indexSettings.getTranslogRetentionSize(), indexSettings.getSoftDeleteRetentionOperations());
final int numOps = scaledRandomIntBetween(100, 10_000);
for (int i = 0; i < numOps; i++) {
final long localCheckPoint = engine.getProcessedLocalCheckpoint();
final long seqno = randomLongBetween(Math.max(0, localCheckPoint), localCheckPoint + 5);
final ParsedDocument doc = testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null);
engine.index(replicaIndexForDoc(doc, 1L, seqno, false));
if (rarely() && engine.getTranslog().shouldRollGeneration()) {
engine.rollTranslogGeneration();
}
if (rarely() || engine.shouldPeriodicallyFlush()) {
engine.flush();
assertThat(engine.shouldPeriodicallyFlush(), equalTo(false));
}
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testUpdateWithFullyDeletedSegments.
@Test
public void testUpdateWithFullyDeletedSegments() throws IOException {
Settings.Builder settings = Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), Integer.MAX_VALUE);
final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final Set<String> liveDocs = new HashSet<>();
try (Store store = createStore();
InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) {
int numDocs = scaledRandomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null);
engine.index(indexForDoc(doc));
liveDocs.add(doc.id());
}
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null);
engine.index(indexForDoc(doc));
liveDocs.add(doc.id());
}
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testBasicCreatedFlag.
@Test
public void testBasicCreatedFlag() throws IOException {
ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null);
Engine.Index index = indexForDoc(doc);
Engine.IndexResult indexResult = engine.index(index);
assertTrue(indexResult.isCreated());
index = indexForDoc(doc);
indexResult = engine.index(index);
assertFalse(indexResult.isCreated());
engine.delete(new Engine.Delete("1", newUid(doc), UNASSIGNED_SEQ_NO, primaryTerm.get(), Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0));
index = indexForDoc(doc);
indexResult = engine.index(index);
assertTrue(indexResult.isCreated());
}
use of org.elasticsearch.index.mapper.ParsedDocument in project crate by crate.
the class InternalEngineTests method testConcurrentWritesAndCommits.
// this test writes documents to the engine while concurrently flushing/commit
// and ensuring that the commit points contain the correct sequence number data
@Test
public void testConcurrentWritesAndCommits() throws Exception {
List<Engine.IndexCommitRef> commits = new ArrayList<>();
try (Store store = createStore();
InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) {
final int numIndexingThreads = scaledRandomIntBetween(2, 4);
final int numDocsPerThread = randomIntBetween(500, 1000);
final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1);
final List<Thread> indexingThreads = new ArrayList<>();
final CountDownLatch doneLatch = new CountDownLatch(numIndexingThreads);
// create N indexing threads to index documents simultaneously
for (int threadNum = 0; threadNum < numIndexingThreads; threadNum++) {
final int threadIdx = threadNum;
Thread indexingThread = new Thread(() -> {
try {
// wait for all threads to start at the same time
barrier.await();
// index random number of docs
for (int i = 0; i < numDocsPerThread; i++) {
final String id = "thread" + threadIdx + "#" + i;
ParsedDocument doc = testParsedDocument(id, null, testDocument(), B_1, null);
engine.index(indexForDoc(doc));
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
doneLatch.countDown();
}
});
indexingThreads.add(indexingThread);
}
// start the indexing threads
for (Thread thread : indexingThreads) {
thread.start();
}
// wait for indexing threads to all be ready to start
barrier.await();
int commitLimit = randomIntBetween(10, 20);
long sleepTime = 1;
// create random commit points
boolean doneIndexing;
do {
doneIndexing = doneLatch.await(sleepTime, TimeUnit.MILLISECONDS);
commits.add(engine.acquireLastIndexCommit(true));
if (commits.size() > commitLimit) {
// don't keep on piling up too many commits
IOUtils.close(commits.remove(randomIntBetween(0, commits.size() - 1)));
// we increase the wait time to make sure we eventually if things are slow wait for threads to finish.
// this will reduce pressure on disks and will allow threads to make progress without piling up too many commits
sleepTime = sleepTime * 2;
}
} while (doneIndexing == false);
// now, verify all the commits have the correct docs according to the user commit data
long prevLocalCheckpoint = SequenceNumbers.NO_OPS_PERFORMED;
long prevMaxSeqNo = SequenceNumbers.NO_OPS_PERFORMED;
for (Engine.IndexCommitRef commitRef : commits) {
final IndexCommit commit = commitRef.getIndexCommit();
Map<String, String> userData = commit.getUserData();
long localCheckpoint = userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ? Long.parseLong(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) : SequenceNumbers.NO_OPS_PERFORMED;
long maxSeqNo = userData.containsKey(SequenceNumbers.MAX_SEQ_NO) ? Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)) : UNASSIGNED_SEQ_NO;
// local checkpoint and max seq no shouldn't go backwards
assertThat(localCheckpoint, greaterThanOrEqualTo(prevLocalCheckpoint));
assertThat(maxSeqNo, greaterThanOrEqualTo(prevMaxSeqNo));
try (IndexReader reader = DirectoryReader.open(commit)) {
Long highest = getHighestSeqNo(reader);
final long highestSeqNo;
if (highest != null) {
highestSeqNo = highest.longValue();
} else {
highestSeqNo = SequenceNumbers.NO_OPS_PERFORMED;
}
// make sure localCheckpoint <= highest seq no found <= maxSeqNo
assertThat(highestSeqNo, greaterThanOrEqualTo(localCheckpoint));
assertThat(highestSeqNo, lessThanOrEqualTo(maxSeqNo));
// make sure all sequence numbers up to and including the local checkpoint are in the index
FixedBitSet seqNosBitSet = getSeqNosSet(reader, highestSeqNo);
for (int i = 0; i <= localCheckpoint; i++) {
assertTrue("local checkpoint [" + localCheckpoint + "], _seq_no [" + i + "] should be indexed", seqNosBitSet.get(i));
}
}
prevLocalCheckpoint = localCheckpoint;
prevMaxSeqNo = maxSeqNo;
}
}
}
Aggregations