use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestTermVectorsWriter method testTermVectorCorruption.
// LUCENE-1168
public void testTermVectorCorruption() throws IOException {
Directory dir = newDirectory();
for (int iter = 0; iter < 2; iter++) {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(new LogDocMergePolicy()));
Document document = new Document();
FieldType customType = new FieldType();
customType.setStored(true);
Field storedField = newField("stored", "stored", customType);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
customType2.setStoreTermVectors(true);
customType2.setStoreTermVectorPositions(true);
customType2.setStoreTermVectorOffsets(true);
Field termVectorField = newField("termVector", "termVector", customType2);
document.add(termVectorField);
writer.addDocument(document);
writer.forceMerge(1);
writer.close();
IndexReader reader = DirectoryReader.open(dir);
for (int i = 0; i < reader.numDocs(); i++) {
reader.document(i);
reader.getTermVectors(i);
}
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(new LogDocMergePolicy()));
Directory[] indexDirs = { new MockDirectoryWrapper(random(), TestUtil.ramCopyOf(dir)) };
writer.addIndexes(indexDirs);
writer.forceMerge(1);
writer.close();
}
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestSloppyPhraseQuery method checkPhraseQuery.
private float checkPhraseQuery(Document doc, PhraseQuery query, int slop, int expectedNumResults) throws Exception {
PhraseQuery.Builder builder = new PhraseQuery.Builder();
Term[] terms = query.getTerms();
int[] positions = query.getPositions();
for (int i = 0; i < terms.length; ++i) {
builder.add(terms[i], positions[i]);
}
builder.setSlop(slop);
query = builder.build();
MockDirectoryWrapper ramDir = new MockDirectoryWrapper(random(), new RAMDirectory());
RandomIndexWriter writer = new RandomIndexWriter(random(), ramDir, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
IndexSearcher searcher = newSearcher(reader);
MaxFreqCollector c = new MaxFreqCollector();
searcher.search(query, c);
assertEquals("slop: " + slop + " query: " + query + " doc: " + doc + " Wrong number of hits", expectedNumResults, c.totalHits);
//QueryUtils.check(query,searcher);
writer.close();
reader.close();
ramDir.close();
// with these different tokens/distributions/lengths.. otherwise this test is very fragile.
return c.max;
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class MockDirectoryFactory method create.
@Override
protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
// we ignore the given lock factory
Directory dir = LuceneTestCase.newDirectory();
Directory cdir = reduce(dir);
cdir = reduce(cdir);
cdir = reduce(cdir);
if (cdir instanceof MockDirectoryWrapper) {
MockDirectoryWrapper mockDirWrapper = (MockDirectoryWrapper) cdir;
// we can't currently do this check because of how
// Solr has to reboot a new Directory sometimes when replicating
// or rolling back - the old directory is closed and the following
// test assumes it can open an IndexWriter when that happens - we
// have a new Directory for the same dir and still an open IW at
// this point
mockDirWrapper.setAssertNoUnrefencedFilesOnClose(false);
// ram dirs in cores that are restarted end up empty
// and check index fails
mockDirWrapper.setCheckIndexOnClose(false);
if (allowReadingFilesStillOpenForWrite) {
mockDirWrapper.setAllowReadingFilesStillOpenForWrite(true);
}
}
return dir;
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class MockFSDirectoryFactory method create.
@Override
public Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
// we pass NoLockFactory, because the real lock factory is set later by injectLockFactory:
Directory dir = LuceneTestCase.newFSDirectory(new File(path).toPath(), lockFactory);
// we can't currently do this check because of how
// Solr has to reboot a new Directory sometimes when replicating
// or rolling back - the old directory is closed and the following
// test assumes it can open an IndexWriter when that happens - we
// have a new Directory for the same dir and still an open IW at
// this point
Directory cdir = reduce(dir);
cdir = reduce(cdir);
cdir = reduce(cdir);
if (cdir instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) cdir).setAssertNoUnrefencedFilesOnClose(false);
}
return dir;
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class BaseCompoundFormatTestCase method testManySubFiles.
// Make sure we don't somehow use more than 1 descriptor
// when reading a CFS with many subs:
public void testManySubFiles() throws IOException {
final MockDirectoryWrapper dir = newMockFSDirectory(createTempDir("CFSManySubFiles"));
final int FILE_COUNT = atLeast(500);
List<String> files = new ArrayList<>();
SegmentInfo si = newSegmentInfo(dir, "_123");
for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
String file = "_123." + fileIdx;
files.add(file);
try (IndexOutput out = dir.createOutput(file, newIOContext(random()))) {
CodecUtil.writeIndexHeader(out, "Foo", 0, si.getId(), "suffix");
out.writeByte((byte) fileIdx);
CodecUtil.writeFooter(out);
}
}
assertEquals(0, dir.getFileHandleCount());
si.setFiles(files);
si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT);
Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT);
final IndexInput[] ins = new IndexInput[FILE_COUNT];
for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
ins[fileIdx] = cfs.openInput("_123." + fileIdx, newIOContext(random()));
CodecUtil.checkIndexHeader(ins[fileIdx], "Foo", 0, 0, si.getId(), "suffix");
}
assertEquals(1, dir.getFileHandleCount());
for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
assertEquals((byte) fileIdx, ins[fileIdx].readByte());
}
assertEquals(1, dir.getFileHandleCount());
for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
ins[fileIdx].close();
}
cfs.close();
dir.close();
}
Aggregations