use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class MinAggregatorTests method testMinAggregator_sortedNumericDv.
public void testMinAggregator_sortedNumericDv() throws Exception {
Directory directory = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
Document document = new Document();
document.add(new SortedNumericDocValuesField("number", 9));
document.add(new SortedNumericDocValuesField("number", 7));
indexWriter.addDocument(document);
document = new Document();
document.add(new SortedNumericDocValuesField("number", 5));
document.add(new SortedNumericDocValuesField("number", 3));
indexWriter.addDocument(document);
document = new Document();
document.add(new SortedNumericDocValuesField("number", 1));
document.add(new SortedNumericDocValuesField("number", -1));
indexWriter.addDocument(document);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
MinAggregationBuilder aggregationBuilder = new MinAggregationBuilder("_name").field("number");
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
try (MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
aggregator.preCollection();
indexSearcher.search(new MatchAllDocsQuery(), aggregator);
aggregator.postCollection();
InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
assertEquals(-1.0, result.getValue(), 0);
}
indexReader.close();
directory.close();
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class ParentFieldSubFetchPhaseTests method testGetParentId.
public void testGetParentId() throws Exception {
ParentFieldMapper fieldMapper = createParentFieldMapper();
Directory directory = newDirectory();
IndexWriter indexWriter = new IndexWriter(directory, newIndexWriterConfig());
Document document = new Document();
document.add(new SortedDocValuesField(fieldMapper.fieldType().name(), new BytesRef("1")));
indexWriter.addDocument(document);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
String id = ParentFieldSubFetchPhase.getParentId(fieldMapper, indexReader.leaves().get(0).reader(), 0);
assertEquals("1", id);
indexReader.close();
directory.close();
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class QueryPhaseTests method countTestCase.
private void countTestCase(boolean withDeletions) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
final int numDocs = scaledRandomIntBetween(100, 200);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
if (randomBoolean()) {
doc.add(new StringField("foo", "bar", Store.NO));
}
if (randomBoolean()) {
doc.add(new StringField("foo", "baz", Store.NO));
}
if (withDeletions && (rarely() || i == 0)) {
doc.add(new StringField("delete", "yes", Store.NO));
}
w.addDocument(doc);
}
if (withDeletions) {
w.deleteDocuments(new Term("delete", "yes"));
}
final IndexReader reader = w.getReader();
Query matchAll = new MatchAllDocsQuery();
Query matchAllCsq = new ConstantScoreQuery(matchAll);
Query tq = new TermQuery(new Term("foo", "bar"));
Query tCsq = new ConstantScoreQuery(tq);
BooleanQuery bq = new BooleanQuery.Builder().add(matchAll, Occur.SHOULD).add(tq, Occur.MUST).build();
countTestCase(matchAll, reader, false);
countTestCase(matchAllCsq, reader, false);
countTestCase(tq, reader, withDeletions);
countTestCase(tCsq, reader, withDeletions);
countTestCase(bq, reader, true);
reader.close();
w.close();
dir.close();
}
use of org.apache.lucene.store.Directory in project neo4j by neo4j.
the class PartitionedIndexStorage method cleanupLuceneDirectory.
/**
* Removes content of the lucene directory denoted by the given {@link File file}. This might seem unnecessary
* since we cleanup the folder using {@link FileSystemAbstraction file system} but in fact for testing we often use
* in-memory directories whose content can't be removed via the file system.
* <p>
* Uses {@link FileUtils#windowsSafeIOOperation(FileUtils.FileOperation)} underneath.
*
* @param folder the path to the directory to cleanup.
* @param zip an optional zip output stream to archive files into.
* @param buffer a byte buffer to use for copying bytes from the files into the archive.
* @throws IOException if removal operation fails.
*/
private void cleanupLuceneDirectory(File folder, ZipOutputStream zip, byte[] buffer) throws IOException {
try (Directory dir = directoryFactory.open(folder)) {
String folderName = folder.getName() + "/";
if (zip != null) {
zip.putNextEntry(new ZipEntry(folderName));
zip.closeEntry();
}
String[] indexFiles = dir.listAll();
for (String indexFile : indexFiles) {
if (zip != null) {
zip.putNextEntry(new ZipEntry(folderName + indexFile));
try (IndexInput input = dir.openInput(indexFile, IOContext.READ)) {
for (long pos = 0, size = input.length(); pos < size; ) {
int read = Math.min(buffer.length, (int) (size - pos));
input.readBytes(buffer, 0, read);
pos += read;
zip.write(buffer, 0, read);
}
}
zip.closeEntry();
}
FileUtils.windowsSafeIOOperation(() -> dir.deleteFile(indexFile));
}
}
}
use of org.apache.lucene.store.Directory in project orientdb by orientechnologies.
the class OLuceneFacetManager method buildFacetIndexIfNeeded.
protected void buildFacetIndexIfNeeded() throws IOException {
if (metadata != null && metadata.containsField(FACET_FIELDS)) {
ODatabaseDocumentInternal database = owner.getDatabase();
Iterable<String> iterable = metadata.field(FACET_FIELDS);
if (iterable != null) {
Directory dir = getTaxDirectory(database);
taxonomyWriter = new DirectoryTaxonomyWriter(dir, IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
for (String s : iterable) {
facetField = s;
// facetField = "facet_" + s;
// facetDim = s;
// config.setIndexFieldName(s, "facet_" + s);
config.setHierarchical(s, true);
}
}
}
}
Aggregations