use of org.apache.lucene.index.IndexWriter in project elasticsearch by elastic.
the class NumberFieldTypeTests method doTestDocValueRangeQueries.
public void doTestDocValueRangeQueries(NumberType type, Supplier<Number> valueSupplier) throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
final int numDocs = TestUtil.nextInt(random(), 100, 500);
for (int i = 0; i < numDocs; ++i) {
w.addDocument(type.createFields("foo", valueSupplier.get(), true, true, false));
}
DirectoryReader reader = DirectoryReader.open(w);
IndexSearcher searcher = newSearcher(reader);
w.close();
final int iters = 10;
for (int iter = 0; iter < iters; ++iter) {
Query query = type.rangeQuery("foo", random().nextBoolean() ? null : valueSupplier.get(), random().nextBoolean() ? null : valueSupplier.get(), randomBoolean(), randomBoolean(), true);
assertThat(query, Matchers.instanceOf(IndexOrDocValuesQuery.class));
IndexOrDocValuesQuery indexOrDvQuery = (IndexOrDocValuesQuery) query;
assertEquals(searcher.count(indexOrDvQuery.getIndexQuery()), searcher.count(indexOrDvQuery.getRandomAccessQuery()));
}
reader.close();
dir.close();
}
use of org.apache.lucene.index.IndexWriter in project elasticsearch by elastic.
the class TypeFieldTypeTests method testTermsQuery.
public void testTermsQuery() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
IndexReader reader = openReaderWithNewType("my_type", w);
TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType();
ft.setName(TypeFieldMapper.NAME);
Query query = ft.termQuery("my_type", null);
assertEquals(new MatchAllDocsQuery(), query.rewrite(reader));
// Make sure that Lucene actually simplifies the query when there is a single type
Query userQuery = new PhraseQuery("body", "quick", "fox");
Query filteredQuery = new BooleanQuery.Builder().add(userQuery, Occur.MUST).add(query, Occur.FILTER).build();
Query rewritten = new IndexSearcher(reader).rewrite(filteredQuery);
assertEquals(userQuery, rewritten);
// ... and does not rewrite it if there is more than one type
reader.close();
reader = openReaderWithNewType("my_type2", w);
Query expected = new ConstantScoreQuery(new BooleanQuery.Builder().add(new TermQuery(new Term(TypeFieldMapper.NAME, "my_type")), Occur.SHOULD).build());
assertEquals(expected, query.rewrite(reader));
BytesRef[] types = new BytesRef[] { new BytesRef("my_type"), new BytesRef("my_type2"), new BytesRef("my_type3") };
// the query should match all documents
query = new TypeFieldMapper.TypesQuery(types);
assertEquals(new MatchAllDocsQuery(), query.rewrite(reader));
reader.close();
reader = openReaderWithNewType("unknown_type", w);
// the query cannot rewrite to a match all docs sinc unknown_type is not queried.
query = new TypeFieldMapper.TypesQuery(types);
expected = new ConstantScoreQuery(new BooleanQuery.Builder().add(new TermQuery(new Term(TypeFieldMapper.CONTENT_TYPE, types[0])), Occur.SHOULD).add(new TermQuery(new Term(TypeFieldMapper.CONTENT_TYPE, types[1])), Occur.SHOULD).build());
rewritten = query.rewrite(reader);
assertEquals(expected, rewritten);
// make sure that redundant types does not rewrite to MatchAllDocsQuery
query = new TypeFieldMapper.TypesQuery(new BytesRef("my_type"), new BytesRef("my_type"), new BytesRef("my_type"));
expected = new ConstantScoreQuery(new BooleanQuery.Builder().add(new TermQuery(new Term(TypeFieldMapper.CONTENT_TYPE, "my_type")), Occur.SHOULD).build());
rewritten = query.rewrite(reader);
assertEquals(expected, rewritten);
IOUtils.close(reader, w, dir);
}
use of org.apache.lucene.index.IndexWriter in project elasticsearch by elastic.
the class ScaledFloatFieldTypeTests method testStats.
public void testStats() throws IOException {
ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
ft.setName("scaled_float");
ft.setScalingFactor(0.1 + randomDouble() * 100);
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
try (DirectoryReader reader = DirectoryReader.open(w)) {
assertNull(ft.stats(reader));
}
Document doc = new Document();
doc.add(new StoredField("scaled_float", -1));
w.addDocument(doc);
try (DirectoryReader reader = DirectoryReader.open(w)) {
// field exists, but has no point values
FieldStats<?> stats = ft.stats(reader);
assertFalse(stats.hasMinMax());
assertNull(stats.getMinValue());
assertNull(stats.getMaxValue());
}
LongPoint point = new LongPoint("scaled_float", -1);
doc.add(point);
w.addDocument(doc);
point.setLongValue(10);
w.addDocument(doc);
try (DirectoryReader reader = DirectoryReader.open(w)) {
FieldStats<?> stats = ft.stats(reader);
assertEquals(-1 / ft.getScalingFactor(), stats.getMinValue());
assertEquals(10 / ft.getScalingFactor(), stats.getMaxValue());
assertEquals(3, stats.getMaxDoc());
}
w.deleteAll();
try (DirectoryReader reader = DirectoryReader.open(w)) {
assertNull(ft.stats(reader));
}
IOUtils.close(w, dir);
}
use of org.apache.lucene.index.IndexWriter in project elasticsearch by elastic.
the class ScaledFloatFieldTypeTests method testFieldData.
public void testFieldData() throws IOException {
ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
ft.setScalingFactor(0.1 + randomDouble() * 100);
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("scaled_float1", 10));
doc.add(new SortedNumericDocValuesField("scaled_float2", 5));
doc.add(new SortedNumericDocValuesField("scaled_float2", 12));
w.addDocument(doc);
try (DirectoryReader reader = DirectoryReader.open(w)) {
IndexMetaData indexMetadata = new IndexMetaData.Builder("index").settings(Settings.builder().put("index.version.created", Version.CURRENT).put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()).build();
IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY);
// single-valued
ft.setName("scaled_float1");
IndexNumericFieldData fielddata = (IndexNumericFieldData) ft.fielddataBuilder().build(indexSettings, ft, null, null, null);
assertEquals(fielddata.getNumericType(), IndexNumericFieldData.NumericType.DOUBLE);
AtomicNumericFieldData leafFieldData = fielddata.load(reader.leaves().get(0));
SortedNumericDoubleValues values = leafFieldData.getDoubleValues();
values.setDocument(0);
assertEquals(1, values.count());
assertEquals(10 / ft.getScalingFactor(), values.valueAt(0), 10e-5);
// multi-valued
ft.setName("scaled_float2");
fielddata = (IndexNumericFieldData) ft.fielddataBuilder().build(indexSettings, ft, null, null, null);
leafFieldData = fielddata.load(reader.leaves().get(0));
values = leafFieldData.getDoubleValues();
values.setDocument(0);
assertEquals(2, values.count());
assertEquals(5 / ft.getScalingFactor(), values.valueAt(0), 10e-5);
assertEquals(12 / ft.getScalingFactor(), values.valueAt(1), 10e-5);
}
IOUtils.close(w, dir);
}
use of org.apache.lucene.index.IndexWriter in project elasticsearch by elastic.
the class StoreTests method testCanOpenIndex.
public void testCanOpenIndex() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
IndexWriterConfig iwc = newIndexWriterConfig();
Path tempDir = createTempDir();
final BaseDirectoryWrapper dir = newFSDirectory(tempDir);
assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
writer.close();
assertTrue(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) {
@Override
public Directory newDirectory() throws IOException {
return dir;
}
};
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
store.markStoreCorrupted(new CorruptIndexException("foo", "bar"));
assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
store.close();
}
Aggregations