use of org.apache.lucene.store.BaseDirectoryWrapper in project lucene-solr by apache.
the class Test2BBinaryDocValues method testFixedBinary.
// indexes IndexWriter.MAX_DOCS docs with a fixed binary field
public void testFixedBinary() throws Exception {
BaseDirectoryWrapper dir = newFSDirectory(createTempDir("2BFixedBinary"));
if (dir instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
}
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).setRAMBufferSizeMB(256.0).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy(false, 10)).setOpenMode(IndexWriterConfig.OpenMode.CREATE).setCodec(TestUtil.getDefaultCodec()));
Document doc = new Document();
byte[] bytes = new byte[4];
BytesRef data = new BytesRef(bytes);
BinaryDocValuesField dvField = new BinaryDocValuesField("dv", data);
doc.add(dvField);
for (int i = 0; i < IndexWriter.MAX_DOCS; i++) {
bytes[0] = (byte) (i >> 24);
bytes[1] = (byte) (i >> 16);
bytes[2] = (byte) (i >> 8);
bytes[3] = (byte) i;
w.addDocument(doc);
if (i % 100000 == 0) {
System.out.println("indexed: " + i);
System.out.flush();
}
}
w.forceMerge(1);
w.close();
System.out.println("verifying...");
System.out.flush();
DirectoryReader r = DirectoryReader.open(dir);
int expectedValue = 0;
for (LeafReaderContext context : r.leaves()) {
LeafReader reader = context.reader();
BinaryDocValues dv = reader.getBinaryDocValues("dv");
for (int i = 0; i < reader.maxDoc(); i++) {
bytes[0] = (byte) (expectedValue >> 24);
bytes[1] = (byte) (expectedValue >> 16);
bytes[2] = (byte) (expectedValue >> 8);
bytes[3] = (byte) expectedValue;
assertEquals(i, dv.nextDoc());
final BytesRef term = dv.binaryValue();
assertEquals(data, term);
expectedValue++;
}
}
r.close();
dir.close();
}
use of org.apache.lucene.store.BaseDirectoryWrapper in project lucene-solr by apache.
the class TestIndexWriterWithThreads method testRollbackAndCommitWithThreads.
// LUCENE-4147
public void testRollbackAndCommitWithThreads() throws Exception {
final BaseDirectoryWrapper d = newDirectory();
final int threadCount = TestUtil.nextInt(random(), 2, 6);
final AtomicReference<IndexWriter> writerRef = new AtomicReference<>();
MockAnalyzer analyzer = new MockAnalyzer(random());
analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
writerRef.set(new IndexWriter(d, newIndexWriterConfig(analyzer)));
// Make initial commit so the test doesn't trip "corrupt first commit" when virus checker refuses to delete partial segments_N file:
writerRef.get().commit();
final LineFileDocs docs = new LineFileDocs(random());
final Thread[] threads = new Thread[threadCount];
final int iters = atLeast(100);
final AtomicBoolean failed = new AtomicBoolean();
final Lock rollbackLock = new ReentrantLock();
final Lock commitLock = new ReentrantLock();
for (int threadID = 0; threadID < threadCount; threadID++) {
threads[threadID] = new Thread() {
@Override
public void run() {
for (int iter = 0; iter < iters && !failed.get(); iter++) {
//final int x = random().nextInt(5);
final int x = random().nextInt(3);
try {
switch(x) {
case 0:
rollbackLock.lock();
if (VERBOSE) {
System.out.println("\nTEST: " + Thread.currentThread().getName() + ": now rollback");
}
try {
writerRef.get().rollback();
if (VERBOSE) {
System.out.println("TEST: " + Thread.currentThread().getName() + ": rollback done; now open new writer");
}
writerRef.set(new IndexWriter(d, newIndexWriterConfig(new MockAnalyzer(random()))));
} finally {
rollbackLock.unlock();
}
break;
case 1:
commitLock.lock();
if (VERBOSE) {
System.out.println("\nTEST: " + Thread.currentThread().getName() + ": now commit");
}
try {
if (random().nextBoolean()) {
writerRef.get().prepareCommit();
}
writerRef.get().commit();
} catch (AlreadyClosedException | NullPointerException ace) {
// ok
} finally {
commitLock.unlock();
}
break;
case 2:
if (VERBOSE) {
System.out.println("\nTEST: " + Thread.currentThread().getName() + ": now add");
}
try {
writerRef.get().addDocument(docs.nextDoc());
} catch (AlreadyClosedException | NullPointerException | AssertionError ace) {
// ok
}
break;
}
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
}
};
threads[threadID].start();
}
for (int threadID = 0; threadID < threadCount; threadID++) {
threads[threadID].join();
}
assertTrue(!failed.get());
writerRef.get().close();
d.close();
}
use of org.apache.lucene.store.BaseDirectoryWrapper in project lucene-solr by apache.
the class TestPagedBytes method testDataInputOutput.
// Writes random byte/s to "normal" file in dir, then
// copies into PagedBytes and verifies with
// PagedBytes.Reader:
public void testDataInputOutput() throws Exception {
Random random = random();
for (int iter = 0; iter < 5 * RANDOM_MULTIPLIER; iter++) {
BaseDirectoryWrapper dir = newFSDirectory(createTempDir("testOverflow"));
if (dir instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
}
final int blockBits = TestUtil.nextInt(random, 1, 20);
final int blockSize = 1 << blockBits;
final PagedBytes p = new PagedBytes(blockBits);
final IndexOutput out = dir.createOutput("foo", IOContext.DEFAULT);
final int numBytes = TestUtil.nextInt(random(), 2, 10000000);
final byte[] answer = new byte[numBytes];
random().nextBytes(answer);
int written = 0;
while (written < numBytes) {
if (random().nextInt(10) == 7) {
out.writeByte(answer[written++]);
} else {
int chunk = Math.min(random().nextInt(1000), numBytes - written);
out.writeBytes(answer, written, chunk);
written += chunk;
}
}
out.close();
final IndexInput input = dir.openInput("foo", IOContext.DEFAULT);
final DataInput in = input.clone();
p.copy(input, input.length());
final PagedBytes.Reader reader = p.freeze(random.nextBoolean());
final byte[] verify = new byte[numBytes];
int read = 0;
while (read < numBytes) {
if (random().nextInt(10) == 7) {
verify[read++] = in.readByte();
} else {
int chunk = Math.min(random().nextInt(1000), numBytes - read);
in.readBytes(verify, read, chunk);
read += chunk;
}
}
assertTrue(Arrays.equals(answer, verify));
final BytesRef slice = new BytesRef();
for (int iter2 = 0; iter2 < 100; iter2++) {
final int pos = random.nextInt(numBytes - 1);
final int len = random.nextInt(Math.min(blockSize + 1, numBytes - pos));
reader.fillSlice(slice, pos, len);
for (int byteUpto = 0; byteUpto < len; byteUpto++) {
assertEquals(answer[pos + byteUpto], slice.bytes[slice.offset + byteUpto]);
}
}
input.close();
dir.close();
}
}
use of org.apache.lucene.store.BaseDirectoryWrapper in project lucene-solr by apache.
the class LuceneTestCase method newFSDirectory.
private static BaseDirectoryWrapper newFSDirectory(Path f, LockFactory lf, boolean bare) {
String fsdirClass = TEST_DIRECTORY;
if (fsdirClass.equals("random")) {
fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
}
Class<? extends FSDirectory> clazz;
try {
try {
clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass);
} catch (ClassCastException e) {
// TEST_DIRECTORY is not a sub-class of FSDirectory, so draw one at random
fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass);
}
Directory fsdir = newFSDirectoryImpl(clazz, f, lf);
BaseDirectoryWrapper wrapped = wrapDirectory(random(), fsdir, bare);
return wrapped;
} catch (Exception e) {
Rethrow.rethrow(e);
// dummy to prevent compiler failure
throw null;
}
}
use of org.apache.lucene.store.BaseDirectoryWrapper in project lucene-solr by apache.
the class TestExternalCodecs method testPerFieldCodec.
// tests storing "id" and "field2" fields as pulsing codec,
// whose term sort is backwards unicode code point, and
// storing "field1" as a custom entirely-in-RAM codec
public void testPerFieldCodec() throws Exception {
final int NUM_DOCS = atLeast(173);
if (VERBOSE) {
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
}
BaseDirectoryWrapper dir = newDirectory();
// we use a custom codec provider
dir.setCheckIndexOnClose(false);
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setCodec(new CustomPerFieldCodec()).setMergePolicy(newLogMergePolicy(3)));
Document doc = new Document();
// uses default codec:
doc.add(newTextField("field1", "this field uses the standard codec as the test", Field.Store.NO));
// uses memory codec:
Field field2 = newTextField("field2", "this field uses the memory codec as the test", Field.Store.NO);
doc.add(field2);
Field idField = newStringField("id", "", Field.Store.NO);
doc.add(idField);
for (int i = 0; i < NUM_DOCS; i++) {
idField.setStringValue("" + i);
w.addDocument(doc);
if ((i + 1) % 10 == 0) {
w.commit();
}
}
if (VERBOSE) {
System.out.println("TEST: now delete id=77");
}
w.deleteDocuments(new Term("id", "77"));
IndexReader r = DirectoryReader.open(w);
assertEquals(NUM_DOCS - 1, r.numDocs());
IndexSearcher s = newSearcher(r);
assertEquals(NUM_DOCS - 1, s.search(new TermQuery(new Term("field1", "standard")), 1).totalHits);
assertEquals(NUM_DOCS - 1, s.search(new TermQuery(new Term("field2", "memory")), 1).totalHits);
r.close();
if (VERBOSE) {
System.out.println("\nTEST: now delete 2nd doc");
}
w.deleteDocuments(new Term("id", "44"));
if (VERBOSE) {
System.out.println("\nTEST: now force merge");
}
w.forceMerge(1);
if (VERBOSE) {
System.out.println("\nTEST: now open reader");
}
r = DirectoryReader.open(w);
assertEquals(NUM_DOCS - 2, r.maxDoc());
assertEquals(NUM_DOCS - 2, r.numDocs());
s = newSearcher(r);
assertEquals(NUM_DOCS - 2, s.search(new TermQuery(new Term("field1", "standard")), 1).totalHits);
assertEquals(NUM_DOCS - 2, s.search(new TermQuery(new Term("field2", "memory")), 1).totalHits);
assertEquals(1, s.search(new TermQuery(new Term("id", "76")), 1).totalHits);
assertEquals(0, s.search(new TermQuery(new Term("id", "77")), 1).totalHits);
assertEquals(0, s.search(new TermQuery(new Term("id", "44")), 1).totalHits);
if (VERBOSE) {
System.out.println("\nTEST: now close NRT reader");
}
r.close();
w.close();
dir.close();
}
Aggregations