use of org.apache.geode.cache.lucene.test.TestObject in project geode by apache.
the class LuceneIndexDestroyDUnitTest method doPutsUntilStopped.
private void doPutsUntilStopped() throws Exception {
allowPuts();
Region region = getCache().getRegion(REGION_NAME);
int i = 0;
while (!STOP_PUTS) {
region.put(i++, new TestObject());
NUM_PUTS_COMPLETED = i;
}
}
use of org.apache.geode.cache.lucene.test.TestObject in project geode by apache.
the class DumpDirectoryFilesIntegrationTest method shouldDumpReadableLuceneIndexFile.
@Test
public void shouldDumpReadableLuceneIndexFile() throws Exception {
luceneService.createIndexFactory().setFields("title", "description").create(INDEX_NAME, REGION_NAME);
Region region = createRegion(REGION_NAME, RegionShortcut.PARTITION);
region.put(0, new TestObject("title 1", "hello world"));
region.put(1 * 113, new TestObject("title 2", "this will not match"));
region.put(2 * 113, new TestObject("title 3", "hello world"));
region.put(3 * 113, new TestObject("hello world", "hello world"));
InternalLuceneIndex index = (InternalLuceneIndex) luceneService.getIndex(INDEX_NAME, REGION_NAME);
luceneService.waitUntilFlushed(INDEX_NAME, REGION_NAME, 60000, TimeUnit.MILLISECONDS);
index.dumpFiles(diskDirRule.get().getAbsolutePath());
// Find the directory for the first bucket
File bucket0 = diskDirRule.get().listFiles(file -> file.getName().endsWith("_0"))[0];
// Test that we can read the lucene index from the dump
final FSDirectory directory = FSDirectory.open(bucket0.toPath());
IndexReader reader = DirectoryReader.open(directory);
IndexSearcher searcher = new IndexSearcher(reader);
final TopDocs results = searcher.search(new MatchAllDocsQuery(), 1000);
assertEquals(4, results.totalHits);
}
use of org.apache.geode.cache.lucene.test.TestObject in project geode by apache.
the class LuceneQueriesIntegrationTest method shouldReturnCorrectResultsOnDeletionAfterQueryExecutionWithLoader.
@Test
public void shouldReturnCorrectResultsOnDeletionAfterQueryExecutionWithLoader() throws Exception {
final int pageSize = 2;
final LuceneQuery<Object, Object> query = addValuesAndCreateQuery(pageSize);
region.getAttributesMutator().setCacheLoader(new CacheLoader() {
@Override
public Object load(final LoaderHelper helper) throws CacheLoaderException {
return new TestObject("should not", "load this");
}
@Override
public void close() {
}
});
final PageableLuceneQueryResults<Object, Object> pages = query.findPages();
List<LuceneResultStruct<Object, Object>> allEntries = new ArrayList<>();
assertTrue(pages.hasNext());
assertEquals(7, pages.size());
// Destroying an entry from the region after the query is executed.
region.destroy("C");
final List<LuceneResultStruct<Object, Object>> page1 = pages.next();
assertEquals(pageSize, page1.size());
final List<LuceneResultStruct<Object, Object>> page2 = pages.next();
assertEquals(pageSize, page2.size());
final List<LuceneResultStruct<Object, Object>> page3 = pages.next();
assertEquals(pageSize, page3.size());
assertFalse(pages.hasNext());
allEntries.addAll(page1);
allEntries.addAll(page2);
allEntries.addAll(page3);
assertEquals(region.keySet(), allEntries.stream().map(entry -> entry.getKey()).collect(Collectors.toSet()));
assertEquals(region.values(), allEntries.stream().map(entry -> entry.getValue()).collect(Collectors.toSet()));
}
use of org.apache.geode.cache.lucene.test.TestObject in project geode by apache.
the class LuceneQueriesIntegrationTest method shouldTokenizeUsingMyCharacterAnalyser.
@Test()
public void shouldTokenizeUsingMyCharacterAnalyser() throws Exception {
Map<String, Analyzer> fields = new HashMap<String, Analyzer>();
// not to specify field1's analyzer, it should use standard analyzer
// Note: fields has to contain "field1", otherwise, field1 will not be tokenized
fields.put("field1", null);
fields.put("field2", new MyCharacterAnalyzer());
luceneService.createIndexFactory().setFields(fields).create(INDEX_NAME, REGION_NAME);
Region region = cache.createRegionFactory(RegionShortcut.PARTITION).create(REGION_NAME);
final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
// Put two values with some of the same tokens
String value1 = "one three";
String value4 = "two_four";
String value3 = "two@four";
region.put("A", new TestObject(value1, value4));
region.put("B", new TestObject(value1, value3));
region.put("C", new TestObject(value3, value3));
region.put("D", new TestObject(value4, value4));
luceneService.waitUntilFlushed(INDEX_NAME, REGION_NAME, 60000, TimeUnit.MILLISECONDS);
verifyQuery("field1:one AND field2:two_four", DEFAULT_FIELD, "A");
verifyQuery("field1:one AND field2:two", DEFAULT_FIELD, "A");
verifyQuery("field1:three AND field2:four", DEFAULT_FIELD, "A");
}
use of org.apache.geode.cache.lucene.test.TestObject in project geode by apache.
the class LuceneQueriesIntegrationTest method shouldNotTokenizeWordsWithKeywordAnalyzer.
@Test()
public void shouldNotTokenizeWordsWithKeywordAnalyzer() throws Exception {
Map<String, Analyzer> fields = new HashMap<String, Analyzer>();
fields.put("field1", new StandardAnalyzer());
fields.put("field2", new KeywordAnalyzer());
luceneService.createIndexFactory().setFields(fields).create(INDEX_NAME, REGION_NAME);
Region region = cache.createRegionFactory(RegionShortcut.PARTITION).create(REGION_NAME);
final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
// Put two values with some of the same tokens
String value1 = "one three";
String value2 = "one two three";
String value3 = "one@three";
region.put("A", new TestObject(value1, value1));
region.put("B", new TestObject(value2, value2));
region.put("C", new TestObject(value3, value3));
// The value will be tokenized into following documents using the analyzers:
// <field1:one three> <field2:one three>
// <field1:one two three> <field2:one two three>
// <field1:one@three> <field2:one@three>
luceneService.waitUntilFlushed(INDEX_NAME, REGION_NAME, 60000, TimeUnit.MILLISECONDS);
// standard analyzer with double quote
// this query string will be parsed as "one three"
// but standard analyzer will parse value "one@three" to be "one three"
// query will be--fields1:"one three"
// so C will be hit by query
verifyQuery("field1:\"one three\"", DEFAULT_FIELD, "A", "C");
// standard analyzer will not tokenize by '_'
// this query string will be parsed as "one_three"
// query will be--field1:one_three
verifyQuery("field1:one_three", DEFAULT_FIELD);
// standard analyzer will tokenize by '@'
// this query string will be parsed as "one" "three"
// query will be--field1:one field1:three
verifyQuery("field1:one@three", DEFAULT_FIELD, "A", "B", "C");
HashMap expectedResults = new HashMap();
expectedResults.put("A", new TestObject(value1, value1));
expectedResults.put("B", new TestObject(value2, value2));
expectedResults.put("C", new TestObject(value3, value3));
verifyQuery("field1:one@three", DEFAULT_FIELD, expectedResults);
// keyword analyzer, this query will only match the entry that exactly matches
// this query string will be parsed as "one three"
// but keyword analyzer will parse one@three to be "one three"
// query will be--field2:one three
verifyQuery("field2:\"one three\"", DEFAULT_FIELD, "A");
// keyword analyzer without double quote. It should be the same as
// with double quote
// query will be--field2:one@three
verifyQuery("field2:one@three", DEFAULT_FIELD, "C");
}
Aggregations