use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class LuceneIndexCreationDUnitTest method verifyDifferentIndexesFails2.
@Test
@Parameters({ "PARTITION" })
public void verifyDifferentIndexesFails2(RegionTestableType regionType) {
SerializableRunnableIF createIndex1 = getFieldsIndexWithOneField();
dataStore1.invoke(() -> initDataStore(createIndex1, regionType));
SerializableRunnableIF createIndex2 = () -> {
LuceneService luceneService = LuceneServiceProvider.get(getCache());
luceneService.createIndexFactory().addField("field1").create(INDEX_NAME, REGION_NAME);
luceneService.createIndexFactory().addField("field2").create(INDEX_NAME + "2", REGION_NAME);
};
dataStore2.invoke(() -> initDataStore(createIndex2, regionType, CANNOT_CREATE_LUCENE_INDEX_DIFFERENT_INDEXES_2));
}
use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class LuceneIndexCreationDUnitTest method verifyDifferentFieldAnalyzerSizesFails1.
@Test
@Parameters({ "PARTITION" })
public void verifyDifferentFieldAnalyzerSizesFails1(RegionTestableType regionType) {
SerializableRunnableIF createIndex1 = getAnalyzersIndexWithTwoFields();
dataStore1.invoke(() -> initDataStore(createIndex1, regionType));
SerializableRunnableIF createIndex2 = getAnalyzersIndexWithOneField();
dataStore2.invoke(() -> initDataStore(createIndex2, regionType, CANNOT_CREATE_LUCENE_INDEX_DIFFERENT_FIELDS_2));
}
use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class LuceneIndexDestroyDUnitTest method verifyDestroyRecreateIndexDifferentName.
@Test
@Parameters(method = "getListOfRegionTestTypes")
public void verifyDestroyRecreateIndexDifferentName(RegionTestableType regionType) {
// Create index and region
SerializableRunnableIF createIndex = createIndex();
dataStore1.invoke(() -> initDataStore(createIndex, regionType));
dataStore2.invoke(() -> initDataStore(createIndex, regionType));
accessor.invoke(() -> initAccessor(createIndex, regionType));
// Verify index created
dataStore1.invoke(() -> verifyIndexCreated());
dataStore2.invoke(() -> verifyIndexCreated());
accessor.invoke(() -> verifyIndexCreated());
// Do puts to cause IndexRepositories to be created
int numPuts = 100;
accessor.invoke(() -> doPuts(numPuts));
// Wait until queue is flushed
accessor.invoke(() -> waitUntilFlushed(INDEX_NAME));
// Execute query and verify results
accessor.invoke(() -> executeQuery(INDEX_NAME, "field1Value", "field1", numPuts));
// Export entries from region
accessor.invoke(() -> exportData(regionType));
// Destroy indexes (only needs to be done on one member)
dataStore1.invoke(() -> destroyIndexes());
// Verify indexes destroyed
dataStore1.invoke(() -> verifyIndexesDestroyed());
dataStore2.invoke(() -> verifyIndexesDestroyed());
// Destroy data region
dataStore1.invoke(() -> destroyDataRegion(true));
// Recreate index and region
String newIndexName = INDEX_NAME + "+_1";
SerializableRunnableIF createIndexNewName = createIndex(newIndexName, REGION_NAME, "field1");
dataStore1.invoke(() -> initDataStore(createIndexNewName, regionType));
dataStore2.invoke(() -> initDataStore(createIndexNewName, regionType));
accessor.invoke(() -> initAccessor(createIndexNewName, regionType));
// Import entries into region
accessor.invoke(() -> importData(regionType, numPuts));
// Wait until queue is flushed
// This verifies there are no deadlocks
dataStore1.invoke(() -> waitUntilFlushed(newIndexName));
dataStore2.invoke(() -> waitUntilFlushed(newIndexName));
// re-execute query and verify results
accessor.invoke(() -> executeQuery(newIndexName, "field1Value", "field1", numPuts));
}
use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class LuceneIndexDestroyDUnitTest method verifyDestroyRecreateIndexSameName.
@Test
@Parameters(method = "getListOfRegionTestTypes")
public void verifyDestroyRecreateIndexSameName(RegionTestableType regionType) {
// Create index and region
SerializableRunnableIF createIndex = createIndex();
dataStore1.invoke(() -> initDataStore(createIndex, regionType));
dataStore2.invoke(() -> initDataStore(createIndex, regionType));
accessor.invoke(() -> initAccessor(createIndex, regionType));
// Verify index created
dataStore1.invoke(() -> verifyIndexCreated());
dataStore2.invoke(() -> verifyIndexCreated());
accessor.invoke(() -> verifyIndexCreated());
// Do puts to cause IndexRepositories to be created
int numPuts = 100;
accessor.invoke(() -> doPuts(numPuts));
// Wait until queue is flushed
accessor.invoke(() -> waitUntilFlushed(INDEX_NAME));
// Execute query and verify results
accessor.invoke(() -> executeQuery(INDEX_NAME, "field1Value", "field1", numPuts));
// Export entries from region
accessor.invoke(() -> exportData(regionType));
// Destroy indexes (only needs to be done on one member)
dataStore1.invoke(() -> destroyIndexes());
// Verify indexes destroyed
dataStore1.invoke(() -> verifyIndexesDestroyed());
dataStore2.invoke(() -> verifyIndexesDestroyed());
// Destroy data region
dataStore1.invoke(() -> destroyDataRegion(true));
// Recreate index and region
dataStore1.invoke(() -> initDataStore(createIndex, regionType));
dataStore2.invoke(() -> initDataStore(createIndex, regionType));
accessor.invoke(() -> initAccessor(createIndex, regionType));
// Import entries into region
accessor.invoke(() -> importData(regionType, numPuts));
// Wait until queue is flushed
// This verifies there are no deadlocks
dataStore1.invoke(() -> waitUntilFlushed(INDEX_NAME));
dataStore2.invoke(() -> waitUntilFlushed(INDEX_NAME));
// re-execute query and verify results
accessor.invoke(() -> executeQuery(INDEX_NAME, "field1Value", "field1", numPuts));
}
use of org.apache.geode.test.dunit.SerializableRunnableIF in project geode by apache.
the class RebalanceWithRedundancyDUnitTest method returnCorrectResultsWhenIndexUpdateHappensIntheMiddleofGII.
@Test
@Parameters(method = "getListOfRegionTestTypes")
public void returnCorrectResultsWhenIndexUpdateHappensIntheMiddleofGII(RegionTestableType regionTestType) throws InterruptedException {
SerializableRunnableIF createIndex = () -> {
LuceneService luceneService = LuceneServiceProvider.get(getCache());
luceneService.createIndexFactory().setFields("text").create(INDEX_NAME, REGION_NAME);
};
dataStore1.invoke(() -> initDataStore(createIndex, regionTestType));
accessor.invoke(() -> initAccessor(createIndex, regionTestType));
dataStore1.invoke(() -> LuceneTestUtilities.pauseSender(getCache()));
putEntryInEachBucket();
dataStore2.invoke(() -> {
InitialImageOperation.setGIITestHook(new GIITestHook(GIITestHookType.AfterSentRequestImage, "Do puts during request") {
@Override
public void reset() {
}
@Override
public String getRegionName() {
return "_B__index#__region.files_0";
}
@Override
public void run() {
dataStore1.invoke(() -> LuceneTestUtilities.resumeSender(getCache()));
waitForFlushBeforeExecuteTextSearch(dataStore1, 30000);
}
});
});
dataStore2.invoke(() -> initDataStore(createIndex, regionTestType));
assertTrue(waitForFlushBeforeExecuteTextSearch(dataStore1, 30000));
dataStore2.invoke(() -> {
PartitionedRegion region = (PartitionedRegion) getCache().getRegion(REGION_NAME);
Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> assertEquals(0, region.getPrStats().getLowRedundancyBucketCount()));
});
dataStore1.invoke(() -> getCache().close());
assertTrue(waitForFlushBeforeExecuteTextSearch(dataStore2, 30000));
executeTextSearch(accessor, "world", "text", NUM_BUCKETS);
}
Aggregations