use of org.apache.geode.cache.lucene.internal.repository.IndexRepository in project geode by apache.
the class DumpDirectoryFiles method execute.
@Override
public void execute(FunctionContext context) {
RegionFunctionContext ctx = (RegionFunctionContext) context;
if (!(context.getArguments() instanceof String[])) {
throw new IllegalArgumentException("Arguments should be a string array");
}
String[] args = (String[]) context.getArguments();
if (args.length != 2) {
throw new IllegalArgumentException("Expected 2 arguments: exportLocation, indexName");
}
String exportLocation = args[0];
String indexName = args[1];
final Region<Object, Object> region = ctx.getDataSet();
LuceneService service = LuceneServiceProvider.get(ctx.getDataSet().getCache());
InternalLuceneIndex index = (InternalLuceneIndex) service.getIndex(indexName, region.getFullPath());
if (index == null) {
throw new IllegalStateException("Index not found for region " + region + " index " + indexName);
}
final RepositoryManager repoManager = index.getRepositoryManager();
try {
final Collection<IndexRepository> repositories = repoManager.getRepositories(ctx);
repositories.stream().forEach(repo -> {
final IndexWriter writer = repo.getWriter();
RegionDirectory directory = (RegionDirectory) writer.getDirectory();
FileSystem fs = directory.getFileSystem();
String bucketName = index.getName() + "_" + repo.getRegion().getFullPath();
bucketName = bucketName.replace("/", "_");
File bucketDirectory = new File(exportLocation, bucketName);
bucketDirectory.mkdirs();
fs.export(bucketDirectory);
});
context.getResultSender().lastResult(null);
} catch (BucketNotFoundException e) {
throw new FunctionException(e);
}
}
use of org.apache.geode.cache.lucene.internal.repository.IndexRepository in project geode by apache.
the class LuceneIndexRecoveryHAIntegrationTest method recoverRepoInANewNode.
/**
* On rebalance, new repository manager will be created. It will try to read fileAndChunkRegion
* and construct index. This test simulates the same.
*/
// @Test
public void recoverRepoInANewNode() throws BucketNotFoundException, IOException, InterruptedException {
LuceneServiceImpl service = (LuceneServiceImpl) LuceneServiceProvider.get(cache);
service.createIndexFactory().setFields(indexedFields).create("index1", "/userRegion");
PartitionAttributes<String, String> attrs = new PartitionAttributesFactory().setTotalNumBuckets(1).create();
RegionFactory<String, String> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
regionfactory.setPartitionAttributes(attrs);
PartitionedRegion userRegion = (PartitionedRegion) regionfactory.create("userRegion");
LuceneIndexForPartitionedRegion index = (LuceneIndexForPartitionedRegion) service.getIndex("index1", "/userRegion");
// put an entry to create the bucket
userRegion.put("rebalance", "test");
service.waitUntilFlushed("index1", "userRegion", 30000, TimeUnit.MILLISECONDS);
RepositoryManager manager = new PartitionedRepositoryManager((LuceneIndexImpl) index, mapper);
IndexRepository repo = manager.getRepository(userRegion, 0, null);
assertNotNull(repo);
repo.create("rebalance", "test");
repo.commit();
// close the region to simulate bucket movement. New node will create repo using data persisted
// by old region
// ((PartitionedRegion)index.fileAndChunkRegion).close();
// ((PartitionedRegion)index.chunkRegion).close();
userRegion.close();
userRegion = (PartitionedRegion) regionfactory.create("userRegion");
userRegion.put("rebalance", "test");
manager = new PartitionedRepositoryManager((LuceneIndexImpl) index, mapper);
IndexRepository newRepo = manager.getRepository(userRegion, 0, null);
Assert.assertNotEquals(newRepo, repo);
}
use of org.apache.geode.cache.lucene.internal.repository.IndexRepository in project geode by apache.
the class PartitionedRepositoryManagerJUnitTest method getByRegion.
@Test
public void getByRegion() throws BucketNotFoundException {
setUpMockBucket(0);
setUpMockBucket(1);
Set<Integer> buckets = new LinkedHashSet<Integer>(Arrays.asList(0, 1));
InternalRegionFunctionContext ctx = Mockito.mock(InternalRegionFunctionContext.class);
when(ctx.getLocalBucketSet((any()))).thenReturn(buckets);
Collection<IndexRepository> repos = repoManager.getRepositories(ctx);
assertEquals(2, repos.size());
Iterator<IndexRepository> itr = repos.iterator();
IndexRepositoryImpl repo0 = (IndexRepositoryImpl) itr.next();
IndexRepositoryImpl repo1 = (IndexRepositoryImpl) itr.next();
assertNotNull(repo0);
assertNotNull(repo1);
assertNotEquals(repo0, repo1);
checkRepository(repo0, 0);
checkRepository(repo1, 1);
}
Aggregations