use of org.apache.lucene.index.LogByteSizeMergePolicy in project elasticsearch by elastic.
the class InternalEngineTests method testForceMerge.
public void testForceMerge() throws IOException {
try (Store store = createStore();
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
// use log MP here we test some behavior in ESMP
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), "test", null, testDocument(), B_1, null);
Engine.Index index = indexForDoc(doc);
engine.index(index);
engine.refresh("test");
}
try (Engine.Searcher test = engine.acquireSearcher("test")) {
assertEquals(numDocs, test.reader().numDocs());
}
engine.forceMerge(true, 1, false, false, false);
assertEquals(engine.segments(true).size(), 1);
ParsedDocument doc = testParsedDocument(Integer.toString(0), "test", null, testDocument(), B_1, null);
Engine.Index index = indexForDoc(doc);
engine.delete(new Engine.Delete(index.type(), index.id(), index.uid()));
//expunge deletes
engine.forceMerge(true, 10, true, false, false);
assertEquals(engine.segments(true).size(), 1);
try (Engine.Searcher test = engine.acquireSearcher("test")) {
assertEquals(numDocs - 1, test.reader().numDocs());
assertEquals(engine.config().getMergePolicy().toString(), numDocs - 1, test.reader().maxDoc());
}
doc = testParsedDocument(Integer.toString(1), "test", null, testDocument(), B_1, null);
index = indexForDoc(doc);
engine.delete(new Engine.Delete(index.type(), index.id(), index.uid()));
//expunge deletes
engine.forceMerge(true, 10, false, false, false);
assertEquals(engine.segments(true).size(), 1);
try (Engine.Searcher test = engine.acquireSearcher("test")) {
assertEquals(numDocs - 2, test.reader().numDocs());
assertEquals(numDocs - 1, test.reader().maxDoc());
}
}
}
use of org.apache.lucene.index.LogByteSizeMergePolicy in project elasticsearch by elastic.
the class BitSetFilterCacheTests method testListener.
public void testListener() throws IOException {
IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
Document document = new Document();
document.add(new StringField("field", "value", Field.Store.NO));
writer.addDocument(document);
writer.commit();
final DirectoryReader writerReader = DirectoryReader.open(writer);
final IndexReader reader = ElasticsearchDirectoryReader.wrap(writerReader, new ShardId("test", "_na_", 0));
final AtomicLong stats = new AtomicLong();
final AtomicInteger onCacheCalls = new AtomicInteger();
final AtomicInteger onRemoveCalls = new AtomicInteger();
final BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() {
@Override
public void onCache(ShardId shardId, Accountable accountable) {
onCacheCalls.incrementAndGet();
stats.addAndGet(accountable.ramBytesUsed());
if (writerReader != reader) {
assertNotNull(shardId);
assertEquals("test", shardId.getIndexName());
assertEquals(0, shardId.id());
} else {
assertNull(shardId);
}
}
@Override
public void onRemoval(ShardId shardId, Accountable accountable) {
onRemoveCalls.incrementAndGet();
stats.addAndGet(-accountable.ramBytesUsed());
if (writerReader != reader) {
assertNotNull(shardId);
assertEquals("test", shardId.getIndexName());
assertEquals(0, shardId.id());
} else {
assertNull(shardId);
}
}
});
BitSetProducer filter = cache.getBitSetProducer(new TermQuery(new Term("field", "value")));
assertThat(matchCount(filter, reader), equalTo(1));
assertTrue(stats.get() > 0);
assertEquals(1, onCacheCalls.get());
assertEquals(0, onRemoveCalls.get());
IOUtils.close(reader, writer);
assertEquals(1, onRemoveCalls.get());
assertEquals(0, stats.get());
}
use of org.apache.lucene.index.LogByteSizeMergePolicy in project elasticsearch by elastic.
the class BitSetFilterCacheTests method testInvalidateEntries.
public void testInvalidateEntries() throws Exception {
IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
Document document = new Document();
document.add(new StringField("field", "value", Field.Store.NO));
writer.addDocument(document);
writer.commit();
document = new Document();
document.add(new StringField("field", "value", Field.Store.NO));
writer.addDocument(document);
writer.commit();
document = new Document();
document.add(new StringField("field", "value", Field.Store.NO));
writer.addDocument(document);
writer.commit();
DirectoryReader reader = DirectoryReader.open(writer);
reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0));
IndexSearcher searcher = new IndexSearcher(reader);
BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() {
@Override
public void onCache(ShardId shardId, Accountable accountable) {
}
@Override
public void onRemoval(ShardId shardId, Accountable accountable) {
}
});
BitSetProducer filter = cache.getBitSetProducer(new TermQuery(new Term("field", "value")));
assertThat(matchCount(filter, reader), equalTo(3));
// now cached
assertThat(matchCount(filter, reader), equalTo(3));
// There are 3 segments
assertThat(cache.getLoadedFilters().weight(), equalTo(3L));
writer.forceMerge(1);
reader.close();
reader = DirectoryReader.open(writer);
reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0));
searcher = new IndexSearcher(reader);
assertThat(matchCount(filter, reader), equalTo(3));
// now cached
assertThat(matchCount(filter, reader), equalTo(3));
// Only one segment now, so the size must be 1
assertThat(cache.getLoadedFilters().weight(), equalTo(1L));
reader.close();
writer.close();
// There is no reference from readers and writer to any segment in the test index, so the size in the fbs cache must be 0
assertThat(cache.getLoadedFilters().weight(), equalTo(0L));
}
use of org.apache.lucene.index.LogByteSizeMergePolicy in project elasticsearch by elastic.
the class InternalEngineTests method testSyncedFlush.
public void testSyncedFlush() throws IOException {
try (Store store = createStore();
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
engine.index(indexForDoc(doc));
Engine.CommitId commitID = engine.flush();
assertThat(commitID, equalTo(new Engine.CommitId(store.readLastCommittedSegmentsInfo().getId())));
byte[] wrongBytes = Base64.getDecoder().decode(commitID.toString());
wrongBytes[0] = (byte) ~wrongBytes[0];
Engine.CommitId wrongId = new Engine.CommitId(wrongBytes);
assertEquals("should fail to sync flush with wrong id (but no docs)", engine.syncFlush(syncId + "1", wrongId), Engine.SyncedFlushResult.COMMIT_MISMATCH);
engine.index(indexForDoc(doc));
assertEquals("should fail to sync flush with right id but pending doc", engine.syncFlush(syncId + "2", commitID), Engine.SyncedFlushResult.PENDING_OPERATIONS);
commitID = engine.flush();
assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), Engine.SyncedFlushResult.SUCCESS);
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
}
}
use of org.apache.lucene.index.LogByteSizeMergePolicy in project elasticsearch by elastic.
the class AbstractFieldDataTestCase method setup.
@Before
public void setup() throws Exception {
// we need 2.x so that fielddata is allowed on string fields
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_0);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
indexService = createIndex("test", settings);
mapperService = indexService.mapperService();
indicesFieldDataCache = getInstanceFromNode(IndicesService.class).getIndicesFieldDataCache();
ifdService = indexService.fieldData();
// LogByteSizeMP to preserve doc ID order
writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
}
Aggregations