use of org.apache.geode.cache.lucene.internal.repository.serializer.Type1 in project geode by apache.
the class LuceneQueriesPersistenceIntegrationTest method shouldReturnCorrectResultsWithEntriesOverflowedToDisk.
@Test
public void shouldReturnCorrectResultsWithEntriesOverflowedToDisk() throws Exception {
String aeqId = LuceneServiceImpl.getUniqueIndexName(INDEX_NAME, REGION_NAME);
LuceneService service = LuceneServiceProvider.get(cache);
service.createIndexFactory().setFields(Type1.fields).create(INDEX_NAME, REGION_NAME);
RegionFactory<String, Type1> regionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
regionFactory.setEvictionAttributes(evicAttr);
PartitionedRegion userRegion = (PartitionedRegion) regionFactory.create(REGION_NAME);
final LuceneIndex index = service.getIndex(INDEX_NAME, REGION_NAME);
Assert.assertEquals(0, userRegion.getDiskRegionStats().getNumOverflowOnDisk());
Type1 value = new Type1("hello world", 1, 2L, 3.0, 4.0f);
userRegion.put("value1", value);
value = new Type1("test world", 1, 2L, 3.0, 4.0f);
userRegion.put("value2", value);
value = new Type1("lucene world", 1, 2L, 3.0, 4.0f);
userRegion.put("value3", value);
service.waitUntilFlushed(INDEX_NAME, REGION_NAME, 60000, TimeUnit.MILLISECONDS);
PartitionedRegion fileRegion = (PartitionedRegion) cache.getRegion(aeqId + ".files");
assertNotNull(fileRegion);
Assert.assertTrue(0 < userRegion.getDiskRegionStats().getNumOverflowOnDisk());
LuceneQuery<Integer, Type1> query = service.createLuceneQueryFactory().create(INDEX_NAME, REGION_NAME, "s:world", DEFAULT_FIELD);
PageableLuceneQueryResults<Integer, Type1> results = query.findPages();
Assert.assertEquals(3, results.size());
}
Aggregations