use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class LuceneIndexDestroyDUnitTest method verifyDestroySingleIndexWhileDoingQueries.
@Test
@Parameters(method = "getListOfRegionTestTypes")
public void verifyDestroySingleIndexWhileDoingQueries(RegionTestableType regionType) throws Exception {
// Create index and region
SerializableRunnableIF createIndex = createIndex();
dataStore1.invoke(() -> initDataStore(createIndex, regionType));
dataStore2.invoke(() -> initDataStore(createIndex, regionType));
accessor.invoke(() -> initAccessor(createIndex, regionType));
// Verify index created
dataStore1.invoke(() -> verifyIndexCreated());
dataStore2.invoke(() -> verifyIndexCreated());
accessor.invoke(() -> verifyIndexCreated());
// Do puts
int numPuts = 100;
accessor.invoke(() -> doPuts(numPuts));
// Wait until queue is flushed
accessor.invoke(() -> waitUntilFlushed(INDEX_NAME));
// Start queries
AsyncInvocation querier = accessor.invokeAsync(() -> doQueriesUntilException(INDEX_NAME, "field1Value", "field1", numPuts));
// Wait until queries have started
accessor.invoke(() -> waitUntilQueriesHaveStarted());
// Destroy index (only needs to be done on one member)
accessor.invoke(() -> destroyIndex());
// Verify index destroyed
dataStore1.invoke(() -> verifyIndexDestroyed());
dataStore2.invoke(() -> verifyIndexDestroyed());
accessor.invoke(() -> verifyIndexDestroyed());
// Wait for the querier to complete and verify no exception has occurred
ThreadUtils.join(querier, 60 * 1000);
if (querier.exceptionOccurred()) {
fail(querier.getException());
}
}
use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class LuceneIndexDestroyDUnitTest method verifyDestroyRecreateDifferentIndex.
@Test
@Parameters(method = "getListOfRegionTestTypes")
public void verifyDestroyRecreateDifferentIndex(RegionTestableType regionType) {
SerializableRunnableIF createIndex = createIndex();
dataStore1.invoke(() -> initDataStore(createIndex, regionType));
dataStore2.invoke(() -> initDataStore(createIndex, regionType));
accessor.invoke(() -> initAccessor(createIndex, regionType));
// Verify index created
dataStore1.invoke(() -> verifyIndexCreated());
dataStore2.invoke(() -> verifyIndexCreated());
accessor.invoke(() -> verifyIndexCreated());
// Do puts to cause IndexRepositories to be created
int numPuts = 1000;
accessor.invoke(() -> doPuts(numPuts));
// Wait until queue is flushed
accessor.invoke(() -> waitUntilFlushed(INDEX_NAME));
// Execute query and verify results
accessor.invoke(() -> executeQuery(INDEX_NAME, "field1Value", "field1", numPuts));
// Export entries from region
accessor.invoke(() -> exportData(regionType));
// Destroy indexes (only needs to be done on one member)
dataStore1.invoke(() -> destroyIndexes());
// Verify indexes destroyed
dataStore1.invoke(() -> verifyIndexesDestroyed());
dataStore2.invoke(() -> verifyIndexesDestroyed());
// Destroy data region
dataStore1.invoke(() -> destroyDataRegion(true));
// Create new index and region
SerializableRunnableIF createNewIndex = createIndex(INDEX_NAME, REGION_NAME, "field2");
dataStore1.invoke(() -> initDataStore(createNewIndex, regionType));
dataStore2.invoke(() -> initDataStore(createNewIndex, regionType));
accessor.invoke(() -> initAccessor(createNewIndex, regionType));
// Import entries into region
accessor.invoke(() -> importData(regionType, numPuts));
// Wait until queue is flushed
// This verifies there are no deadlocks
dataStore1.invoke(() -> waitUntilFlushed(INDEX_NAME));
dataStore2.invoke(() -> waitUntilFlushed(INDEX_NAME));
// re-execute query and verify results
accessor.invoke(() -> executeQuery(INDEX_NAME, "field2Value", "field2", numPuts));
}
use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class MixedObjectIndexDUnitTest method luceneMustIndexFieldsWithTheSameNameDifferentDataTypeInARegionWithMixedObjects.
@Test
@Parameters(method = "getPartitionRegionTypes")
public void luceneMustIndexFieldsWithTheSameNameDifferentDataTypeInARegionWithMixedObjects(RegionTestableType regionTestableType) {
SerializableRunnableIF createIndexOnTextField = getSerializableRunnableIFCreateIndexOnFieldText();
dataStore1.invoke(() -> initDataStore(createIndexOnTextField, regionTestableType));
dataStore2.invoke(() -> initDataStore(createIndexOnTextField, regionTestableType));
accessor.invoke(() -> initDataStore(createIndexOnTextField, regionTestableType));
accessor.invoke(() -> {
Region region = getCache().getRegion(REGION_NAME);
IntStream.range(2 * NUM_BUCKETS, 3 * NUM_BUCKETS).forEach(i -> region.put(i, new TestObjectSameFieldNameButDifferentDataTypeInteger(new Integer(1000))));
IntStream.range(0, NUM_BUCKETS).forEach(i -> region.put(i, new TestObject("hello world")));
IntStream.range(NUM_BUCKETS, 2 * NUM_BUCKETS).forEach(i -> region.put(i, new TestObjectSameFieldNameButDifferentDataTypeFloat(new Float(999.1f))));
});
assertTrue(waitForFlushBeforeExecuteTextSearch(accessor, 60000));
accessor.invoke(() -> {
LuceneService luceneService = LuceneServiceProvider.get(getCache());
LuceneQuery luceneQueryForTextField = luceneService.createLuceneQueryFactory().setLimit(100).create(INDEX_NAME, REGION_NAME, "world", "text");
List luceneResults = luceneQueryForTextField.findResults();
validateObjectResultCounts(luceneResults, TestObject.class, NUM_BUCKETS, TestObjectSameFieldNameButDifferentDataTypeFloat.class, 0, TestObjectSameFieldNameButDifferentDataTypeInteger.class, 0);
FloatRangeQueryProvider floatRangeQueryProvider = new FloatRangeQueryProvider("text", 999.0f, 999.2f);
luceneQueryForTextField = luceneService.createLuceneQueryFactory().setLimit(100).create(INDEX_NAME, REGION_NAME, floatRangeQueryProvider);
luceneResults = luceneQueryForTextField.findResults();
validateObjectResultCounts(luceneResults, TestObject.class, 0, TestObjectSameFieldNameButDifferentDataTypeFloat.class, NUM_BUCKETS, TestObjectSameFieldNameButDifferentDataTypeInteger.class, 0);
IntRangeQueryProvider intRangeQueryProvider = new IntRangeQueryProvider("text", 1000, 1000);
luceneQueryForTextField = luceneService.createLuceneQueryFactory().setLimit(100).create(INDEX_NAME, REGION_NAME, intRangeQueryProvider);
luceneResults = luceneQueryForTextField.findResults();
validateObjectResultCounts(luceneResults, TestObject.class, 0, TestObjectSameFieldNameButDifferentDataTypeFloat.class, 0, TestObjectSameFieldNameButDifferentDataTypeInteger.class, NUM_BUCKETS);
});
}
use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class LuceneIndexDestroyDUnitTest method verifyDestroyAllIndexesWhileDoingQueries.
@Test
@Parameters(method = "getListOfRegionTestTypes")
public void verifyDestroyAllIndexesWhileDoingQueries(RegionTestableType regionType) throws Exception {
// Create indexes and region
SerializableRunnableIF createIndexes = createIndexes();
dataStore1.invoke(() -> initDataStore(createIndexes, regionType));
dataStore2.invoke(() -> initDataStore(createIndexes, regionType));
accessor.invoke(() -> initAccessor(createIndexes, regionType));
// Verify indexes created
dataStore1.invoke(() -> verifyIndexesCreated());
dataStore2.invoke(() -> verifyIndexesCreated());
accessor.invoke(() -> verifyIndexesCreated());
// Do puts
int numPuts = 100;
accessor.invoke(() -> doPuts(numPuts));
// Wait until queues are flushed
accessor.invoke(() -> waitUntilFlushed(INDEX1_NAME));
accessor.invoke(() -> waitUntilFlushed(INDEX2_NAME));
// Start queries
AsyncInvocation querier = accessor.invokeAsync(() -> doQueriesUntilException(INDEX1_NAME, "field1Value", "field1", numPuts));
// Wait until queries have started
accessor.invoke(() -> waitUntilQueriesHaveStarted());
// Destroy indexes (only needs to be done on one member)
accessor.invoke(() -> destroyIndexes());
// Verify indexes destroyed
dataStore1.invoke(() -> verifyIndexesDestroyed());
dataStore2.invoke(() -> verifyIndexesDestroyed());
accessor.invoke(() -> verifyIndexesDestroyed());
// Wait for the querier to complete and verify no unexpected exception has occurred
ThreadUtils.join(querier, 60 * 1000);
if (querier.exceptionOccurred()) {
fail(querier.getException());
}
}
use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class MixedObjectIndexDUnitTest method luceneMustIndexFieldsWithTheSameNameInARegionWithMixedObjects.
@Test
@Parameters(method = "getPartitionRegionTypes")
public void luceneMustIndexFieldsWithTheSameNameInARegionWithMixedObjects(RegionTestableType regionTestableType) {
SerializableRunnableIF createIndexOnTextField = getSerializableRunnableIFCreateIndexOnFieldText();
dataStore1.invoke(() -> initDataStore(createIndexOnTextField, regionTestableType));
dataStore2.invoke(() -> initDataStore(createIndexOnTextField, regionTestableType));
accessor.invoke(() -> initDataStore(createIndexOnTextField, regionTestableType));
accessor.invoke(() -> {
Region region = getCache().getRegion(REGION_NAME);
IntStream.range(0, NUM_BUCKETS).forEach(i -> region.put(i, new TestObject("hello world")));
IntStream.range(NUM_BUCKETS, 2 * NUM_BUCKETS).forEach(i -> region.put(i, new TestObjectWithSameFieldName("hello world")));
IntStream.range(2 * NUM_BUCKETS, 3 * NUM_BUCKETS).forEach(i -> region.put(i, new TestObjectWithNoCommonField("hello world")));
});
waitForFlushBeforeExecuteTextSearch(accessor, 60000);
accessor.invoke(() -> {
LuceneService luceneService = LuceneServiceProvider.get(getCache());
LuceneQuery luceneQueryForTextField = luceneService.createLuceneQueryFactory().setLimit(100).create(INDEX_NAME, REGION_NAME, "world", "text");
List luceneResults = luceneQueryForTextField.findResults();
validateObjectResultCounts(luceneResults, TestObject.class, NUM_BUCKETS, TestObjectWithSameFieldName.class, NUM_BUCKETS, TestObjectWithNoCommonField.class, 0);
});
}
Aggregations