use of org.apache.lucene.util.LineFileDocs in project elasticsearch by elastic.
the class TranslogTests method testTranslogOpsCountIsCorrect.
public void testTranslogOpsCountIsCorrect() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int numOps = randomIntBetween(100, 200);
// writes pretty big docs so we cross buffer boarders regularly
LineFileDocs lineFileDocs = new LineFileDocs(random());
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
locations.add(translog.add(new Translog.Index("test", "" + opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))));
Translog.Snapshot snapshot = this.translog.newSnapshot();
assertEquals(opsAdded + 1, snapshot.totalOperations());
for (int i = 0; i < opsAdded; i++) {
assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(), locations.get(i).generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
}
}
}
use of org.apache.lucene.util.LineFileDocs in project elasticsearch by elastic.
the class DeflateCompressTests method testMixed.
public void testMixed() throws IOException {
Random r = random();
LineFileDocs lineFileDocs = new LineFileDocs(r);
for (int i = 0; i < 2; ++i) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
int prevInt = r.nextInt();
long prevLong = r.nextLong();
while (bos.size() < 400000) {
switch(r.nextInt(4)) {
case 0:
addInt(r, prevInt, bos);
break;
case 1:
addLong(r, prevLong, bos);
break;
case 2:
addString(lineFileDocs, bos);
break;
case 3:
addBytes(r, bos);
break;
default:
throw new IllegalStateException("Random is broken");
}
}
doTest(bos.toByteArray());
}
}
use of org.apache.lucene.util.LineFileDocs in project elasticsearch by elastic.
the class DeflateCompressTests method testLineDocs.
public void testLineDocs() throws IOException {
Random r = random();
LineFileDocs lineFileDocs = new LineFileDocs(r);
for (int i = 0; i < 10; i++) {
int numDocs = TestUtil.nextInt(r, 1, 200);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
for (int j = 0; j < numDocs; j++) {
String s = lineFileDocs.nextDoc().get("body");
bos.write(s.getBytes(StandardCharsets.UTF_8));
}
doTest(bos.toByteArray());
}
lineFileDocs.close();
}
use of org.apache.lucene.util.LineFileDocs in project elasticsearch by elastic.
the class DeflateCompressTests method testLineDocsThreads.
public void testLineDocsThreads() throws Exception {
final Random r = random();
int threadCount = TestUtil.nextInt(r, 2, 6);
Thread[] threads = new Thread[threadCount];
final CountDownLatch startingGun = new CountDownLatch(1);
for (int tid = 0; tid < threadCount; tid++) {
final long seed = r.nextLong();
threads[tid] = new Thread() {
@Override
public void run() {
try {
Random r = new Random(seed);
startingGun.await();
LineFileDocs lineFileDocs = new LineFileDocs(r);
for (int i = 0; i < 10; i++) {
int numDocs = TestUtil.nextInt(r, 1, 200);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
for (int j = 0; j < numDocs; j++) {
String s = lineFileDocs.nextDoc().get("body");
bos.write(s.getBytes(StandardCharsets.UTF_8));
}
doTest(bos.toByteArray());
}
lineFileDocs.close();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
threads[tid].start();
}
startingGun.countDown();
for (Thread t : threads) {
t.join();
}
}
use of org.apache.lucene.util.LineFileDocs in project lucene-solr by apache.
the class TestNRTCachingDirectory method testNRTAndCommit.
public void testNRTAndCommit() throws Exception {
Directory dir = newDirectory();
NRTCachingDirectory cachedDir = new NRTCachingDirectory(dir, 2.0, 25.0);
MockAnalyzer analyzer = new MockAnalyzer(random());
analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
IndexWriterConfig conf = newIndexWriterConfig(analyzer);
RandomIndexWriter w = new RandomIndexWriter(random(), cachedDir, conf);
final LineFileDocs docs = new LineFileDocs(random());
final int numDocs = TestUtil.nextInt(random(), 100, 400);
if (VERBOSE) {
System.out.println("TEST: numDocs=" + numDocs);
}
final List<BytesRef> ids = new ArrayList<>();
DirectoryReader r = null;
for (int docCount = 0; docCount < numDocs; docCount++) {
final Document doc = docs.nextDoc();
ids.add(new BytesRef(doc.get("docid")));
w.addDocument(doc);
if (random().nextInt(20) == 17) {
if (r == null) {
r = DirectoryReader.open(w.w);
} else {
final DirectoryReader r2 = DirectoryReader.openIfChanged(r);
if (r2 != null) {
r.close();
r = r2;
}
}
assertEquals(1 + docCount, r.numDocs());
final IndexSearcher s = newSearcher(r);
// Just make sure search can run; we can't assert
// totHits since it could be 0
TopDocs hits = s.search(new TermQuery(new Term("body", "the")), 10);
// System.out.println("tot hits " + hits.totalHits);
}
}
if (r != null) {
r.close();
}
// Close should force cache to clear since all files are sync'd
w.close();
final String[] cachedFiles = cachedDir.listCachedFiles();
for (String file : cachedFiles) {
System.out.println("FAIL: cached file " + file + " remains after sync");
}
assertEquals(0, cachedFiles.length);
r = DirectoryReader.open(dir);
for (BytesRef id : ids) {
assertEquals(1, r.docFreq(new Term("docid", id)));
}
r.close();
cachedDir.close();
docs.close();
}
Aggregations