use of org.apache.geode.cache.lucene.internal.repository.RepositoryManager in project geode by apache.
the class DumpDirectoryFilesJUnitTest method createMocks.
@Before
public void createMocks() throws BucketNotFoundException {
GemFireCacheImpl cache = Fakes.cache();
context = mock(RegionFunctionContext.class);
ResultSender sender = mock(ResultSender.class);
Region region = mock(Region.class);
InternalLuceneService service = mock(InternalLuceneService.class);
InternalLuceneIndex index = mock(InternalLuceneIndex.class);
RepositoryManager repoManager = mock(RepositoryManager.class);
IndexRepository repo = mock(IndexRepository.class);
IndexWriter writer = mock(IndexWriter.class);
RegionDirectory directory = mock(RegionDirectory.class);
fileSystem = mock(FileSystem.class);
Region bucket = mock(Region.class);
when(bucket.getFullPath()).thenReturn(bucketName);
when(context.getArguments()).thenReturn(new String[] { directoryName, indexName });
when(context.getResultSender()).thenReturn(sender);
when(context.getDataSet()).thenReturn(region);
when(region.getCache()).thenReturn(cache);
when(cache.getService(any())).thenReturn(service);
when(repoManager.getRepositories(eq(context))).thenReturn(Collections.singleton(repo));
when(index.getRepositoryManager()).thenReturn(repoManager);
when(index.getName()).thenReturn(indexName);
when(service.getIndex(eq(indexName), any())).thenReturn(index);
when(directory.getFileSystem()).thenReturn(fileSystem);
when(writer.getDirectory()).thenReturn(directory);
when(repo.getWriter()).thenReturn(writer);
when(repo.getRegion()).thenReturn(bucket);
}
use of org.apache.geode.cache.lucene.internal.repository.RepositoryManager in project geode by apache.
the class LuceneEventListenerJUnitTest method testProcessBatch.
@Test
public void testProcessBatch() throws Exception {
RepositoryManager manager = Mockito.mock(RepositoryManager.class);
IndexRepository repo1 = Mockito.mock(IndexRepository.class);
IndexRepository repo2 = Mockito.mock(IndexRepository.class);
Region region1 = Mockito.mock(Region.class);
Region region2 = Mockito.mock(Region.class);
Object callback1 = new Object();
Mockito.when(manager.getRepository(eq(region1), any(), eq(callback1))).thenReturn(repo1);
Mockito.when(manager.getRepository(eq(region2), any(), eq(null))).thenReturn(repo2);
LuceneEventListener listener = new LuceneEventListener(manager);
List<AsyncEvent> events = new ArrayList<AsyncEvent>();
int numEntries = 100;
for (int i = 0; i < numEntries; i++) {
AsyncEvent event = Mockito.mock(AsyncEvent.class);
Region region = i % 2 == 0 ? region1 : region2;
Object callback = i % 2 == 0 ? callback1 : null;
Mockito.when(event.getRegion()).thenReturn(region);
Mockito.when(event.getKey()).thenReturn(i);
Mockito.when(event.getCallbackArgument()).thenReturn(callback);
switch(i % 4) {
case 0:
case 1:
final EntrySnapshot entry = mock(EntrySnapshot.class);
when(entry.getRawValue(true)).thenReturn(i);
when(region.getEntry(eq(i))).thenReturn(entry);
break;
case 2:
case 3:
// Do nothing, get value will return a destroy
break;
}
events.add(event);
}
listener.processEvents(events);
verify(repo1, atLeast(numEntries / 4)).delete(any());
verify(repo1, atLeast(numEntries / 4)).update(any(), any());
verify(repo2, atLeast(numEntries / 4)).delete(any());
verify(repo2, atLeast(numEntries / 4)).update(any(), any());
verify(repo1, times(1)).commit();
verify(repo2, times(1)).commit();
}
use of org.apache.geode.cache.lucene.internal.repository.RepositoryManager in project geode by apache.
the class LuceneQueryFunction method execute.
@Override
public void execute(FunctionContext context) {
RegionFunctionContext ctx = (RegionFunctionContext) context;
ResultSender<TopEntriesCollector> resultSender = ctx.getResultSender();
Region region = ctx.getDataSet();
LuceneFunctionContext<IndexResultCollector> searchContext = (LuceneFunctionContext) ctx.getArguments();
if (searchContext == null) {
throw new IllegalArgumentException("Missing search context");
}
LuceneQueryProvider queryProvider = searchContext.getQueryProvider();
if (queryProvider == null) {
throw new IllegalArgumentException("Missing query provider");
}
LuceneIndexImpl index = getLuceneIndex(region, searchContext);
if (index == null) {
throw new LuceneIndexNotFoundException(searchContext.getIndexName(), region.getFullPath());
}
RepositoryManager repoManager = index.getRepositoryManager();
LuceneIndexStats stats = index.getIndexStats();
Query query = getQuery(queryProvider, index);
if (logger.isDebugEnabled()) {
logger.debug("Executing lucene query: {}, on region {}", query, region.getFullPath());
}
int resultLimit = searchContext.getLimit();
CollectorManager manager = (searchContext == null) ? null : searchContext.getCollectorManager();
if (manager == null) {
manager = new TopEntriesCollectorManager(null, resultLimit);
}
Collection<IndexResultCollector> results = new ArrayList<>();
TopEntriesCollector mergedResult = null;
try {
long start = stats.startQuery();
Collection<IndexRepository> repositories = null;
try {
repositories = repoManager.getRepositories(ctx);
for (IndexRepository repo : repositories) {
IndexResultCollector collector = manager.newCollector(repo.toString());
if (logger.isDebugEnabled()) {
logger.debug("Executing search on repo: " + repo.toString());
}
repo.query(query, resultLimit, collector);
results.add(collector);
}
mergedResult = (TopEntriesCollector) manager.reduce(results);
} finally {
stats.endQuery(start, mergedResult == null ? 0 : mergedResult.size());
}
stats.incNumberOfQueryExecuted();
resultSender.lastResult(mergedResult);
} catch (IOException | BucketNotFoundException | CacheClosedException | PrimaryBucketException e) {
logger.debug("Exception during lucene query function", e);
throw new InternalFunctionInvocationTargetException(e);
}
}
use of org.apache.geode.cache.lucene.internal.repository.RepositoryManager in project geode by apache.
the class DumpDirectoryFiles method execute.
@Override
public void execute(FunctionContext context) {
RegionFunctionContext ctx = (RegionFunctionContext) context;
if (!(context.getArguments() instanceof String[])) {
throw new IllegalArgumentException("Arguments should be a string array");
}
String[] args = (String[]) context.getArguments();
if (args.length != 2) {
throw new IllegalArgumentException("Expected 2 arguments: exportLocation, indexName");
}
String exportLocation = args[0];
String indexName = args[1];
final Region<Object, Object> region = ctx.getDataSet();
LuceneService service = LuceneServiceProvider.get(ctx.getDataSet().getCache());
InternalLuceneIndex index = (InternalLuceneIndex) service.getIndex(indexName, region.getFullPath());
if (index == null) {
throw new IllegalStateException("Index not found for region " + region + " index " + indexName);
}
final RepositoryManager repoManager = index.getRepositoryManager();
try {
final Collection<IndexRepository> repositories = repoManager.getRepositories(ctx);
repositories.stream().forEach(repo -> {
final IndexWriter writer = repo.getWriter();
RegionDirectory directory = (RegionDirectory) writer.getDirectory();
FileSystem fs = directory.getFileSystem();
String bucketName = index.getName() + "_" + repo.getRegion().getFullPath();
bucketName = bucketName.replace("/", "_");
File bucketDirectory = new File(exportLocation, bucketName);
bucketDirectory.mkdirs();
fs.export(bucketDirectory);
});
context.getResultSender().lastResult(null);
} catch (BucketNotFoundException e) {
throw new FunctionException(e);
}
}
use of org.apache.geode.cache.lucene.internal.repository.RepositoryManager in project geode by apache.
the class LuceneIndexRecoveryHAIntegrationTest method recoverRepoInANewNode.
/**
* On rebalance, new repository manager will be created. It will try to read fileAndChunkRegion
* and construct index. This test simulates the same.
*/
// @Test
public void recoverRepoInANewNode() throws BucketNotFoundException, IOException, InterruptedException {
LuceneServiceImpl service = (LuceneServiceImpl) LuceneServiceProvider.get(cache);
service.createIndexFactory().setFields(indexedFields).create("index1", "/userRegion");
PartitionAttributes<String, String> attrs = new PartitionAttributesFactory().setTotalNumBuckets(1).create();
RegionFactory<String, String> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
regionfactory.setPartitionAttributes(attrs);
PartitionedRegion userRegion = (PartitionedRegion) regionfactory.create("userRegion");
LuceneIndexForPartitionedRegion index = (LuceneIndexForPartitionedRegion) service.getIndex("index1", "/userRegion");
// put an entry to create the bucket
userRegion.put("rebalance", "test");
service.waitUntilFlushed("index1", "userRegion", 30000, TimeUnit.MILLISECONDS);
RepositoryManager manager = new PartitionedRepositoryManager((LuceneIndexImpl) index, mapper);
IndexRepository repo = manager.getRepository(userRegion, 0, null);
assertNotNull(repo);
repo.create("rebalance", "test");
repo.commit();
// close the region to simulate bucket movement. New node will create repo using data persisted
// by old region
// ((PartitionedRegion)index.fileAndChunkRegion).close();
// ((PartitionedRegion)index.chunkRegion).close();
userRegion.close();
userRegion = (PartitionedRegion) regionfactory.create("userRegion");
userRegion.put("rebalance", "test");
manager = new PartitionedRepositoryManager((LuceneIndexImpl) index, mapper);
IndexRepository newRepo = manager.getRepository(userRegion, 0, null);
Assert.assertNotEquals(newRepo, repo);
}
Aggregations