use of org.apache.lucene.benchmark.byTask.tasks.TaskSequence in project lucene-solr by apache.
the class LineDocSourceTest method doIndexAndSearchTestWithRepeats.
private void doIndexAndSearchTestWithRepeats(Path file, Class<? extends LineParser> lineParserClass, int numAdds, String storedField) throws Exception {
IndexReader reader = null;
IndexSearcher searcher = null;
PerfRunData runData = null;
try {
Properties props = new Properties();
// LineDocSource specific settings.
props.setProperty("docs.file", file.toAbsolutePath().toString());
if (lineParserClass != null) {
props.setProperty("line.parser", lineParserClass.getName());
}
// Indexing configuration.
props.setProperty("analyzer", WhitespaceAnalyzer.class.getName());
props.setProperty("content.source", LineDocSource.class.getName());
props.setProperty("directory", "RAMDirectory");
props.setProperty("doc.stored", "true");
props.setProperty("doc.index.props", "true");
// Create PerfRunData
Config config = new Config(props);
runData = new PerfRunData(config);
TaskSequence tasks = new TaskSequence(runData, "testBzip2", null, false);
tasks.addTask(new CreateIndexTask(runData));
for (int i = 0; i < numAdds; i++) {
tasks.addTask(new AddDocTask(runData));
}
tasks.addTask(new CloseIndexTask(runData));
try {
tasks.doLogic();
} finally {
tasks.close();
}
reader = DirectoryReader.open(runData.getDirectory());
searcher = newSearcher(reader);
TopDocs td = searcher.search(new TermQuery(new Term("body", "body")), 10);
assertEquals(numAdds, td.totalHits);
assertNotNull(td.scoreDocs[0]);
if (storedField == null) {
// added to all docs and satisfies field-name == value
storedField = DocMaker.BODY_FIELD;
}
assertEquals("Wrong field value", storedField, searcher.doc(0).get(storedField));
} finally {
IOUtils.close(reader, runData);
}
}
use of org.apache.lucene.benchmark.byTask.tasks.TaskSequence in project lucene-solr by apache.
the class DocMakerTest method doTestIndexProperties.
private void doTestIndexProperties(boolean setIndexProps, boolean indexPropsVal, int numExpectedResults) throws Exception {
Properties props = new Properties();
// Indexing configuration.
props.setProperty("analyzer", WhitespaceAnalyzer.class.getName());
props.setProperty("content.source", OneDocSource.class.getName());
props.setProperty("directory", "RAMDirectory");
if (setIndexProps) {
props.setProperty("doc.index.props", Boolean.toString(indexPropsVal));
}
// Create PerfRunData
Config config = new Config(props);
PerfRunData runData = new PerfRunData(config);
TaskSequence tasks = new TaskSequence(runData, getTestName(), null, false);
tasks.addTask(new CreateIndexTask(runData));
tasks.addTask(new AddDocTask(runData));
tasks.addTask(new CloseIndexTask(runData));
tasks.doLogic();
IndexReader reader = DirectoryReader.open(runData.getDirectory());
IndexSearcher searcher = newSearcher(reader);
TopDocs td = searcher.search(new TermQuery(new Term("key", "value")), 10);
assertEquals(numExpectedResults, td.totalHits);
reader.close();
}
use of org.apache.lucene.benchmark.byTask.tasks.TaskSequence in project lucene-solr by apache.
the class Sample method main.
public static void main(String[] args) throws Exception {
Properties p = initProps();
Config conf = new Config(p);
PerfRunData runData = new PerfRunData(conf);
// 1. top sequence
// top level, not parallel
TaskSequence top = new TaskSequence(runData, null, null, false);
// 2. task to create the index
CreateIndexTask create = new CreateIndexTask(runData);
top.addTask(create);
// 3. task seq to add 500 docs (order matters - top to bottom - add seq to top, only then add to seq)
TaskSequence seq1 = new TaskSequence(runData, "AddDocs", top, false);
seq1.setRepetitions(500);
seq1.setNoChildReport();
top.addTask(seq1);
// 4. task to add the doc
AddDocTask addDoc = new AddDocTask(runData);
//addDoc.setParams("1200"); // doc size limit if supported
// order matters 9see comment above)
seq1.addTask(addDoc);
// 5. task to close the index
CloseIndexTask close = new CloseIndexTask(runData);
top.addTask(close);
// task to report
RepSumByNameTask rep = new RepSumByNameTask(runData);
top.addTask(rep);
// print algorithm
System.out.println(top.toString());
// execute
top.doLogic();
}
use of org.apache.lucene.benchmark.byTask.tasks.TaskSequence in project lucene-solr by apache.
the class Algorithm method extractTasks.
private void extractTasks(ArrayList<PerfTask> extrct, TaskSequence seq) {
if (seq == null)
return;
extrct.add(seq);
ArrayList<PerfTask> t = sequence.getTasks();
if (t == null)
return;
for (final PerfTask p : t) {
if (p instanceof TaskSequence) {
extractTasks(extrct, (TaskSequence) p);
} else {
extrct.add(p);
}
}
}
use of org.apache.lucene.benchmark.byTask.tasks.TaskSequence in project lucene-solr by apache.
the class TestPerfTasksParse method testParseParallelTaskSequenceRepetition.
/** Test the repetiotion parsing for parallel tasks */
public void testParseParallelTaskSequenceRepetition() throws Exception {
String taskStr = "AddDoc";
String parsedTasks = "[ " + taskStr + " ] : 1000";
Benchmark benchmark = new Benchmark(new StringReader(propPart + parsedTasks));
Algorithm alg = benchmark.getAlgorithm();
ArrayList<PerfTask> algTasks = alg.extractTasks();
boolean foundAdd = false;
for (final PerfTask task : algTasks) {
if (task.toString().indexOf(taskStr) >= 0) {
foundAdd = true;
}
if (task instanceof TaskSequence) {
assertEquals("repetions should be 1000 for " + parsedTasks, 1000, ((TaskSequence) task).getRepetitions());
assertTrue("sequence for " + parsedTasks + " should be parallel!", ((TaskSequence) task).isParallel());
}
assertTrue("Task " + taskStr + " was not found in " + alg.toString(), foundAdd);
}
}
Aggregations