use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class AbstractPartitionedRepositoryManager method getRepositories.
@Override
public Collection<IndexRepository> getRepositories(RegionFunctionContext ctx) throws BucketNotFoundException {
Region<Object, Object> region = ctx.getDataSet();
Set<Integer> buckets = ((InternalRegionFunctionContext) ctx).getLocalBucketSet(region);
ArrayList<IndexRepository> repos = new ArrayList<IndexRepository>(buckets.size());
for (Integer bucketId : buckets) {
BucketRegion userBucket = userRegion.getDataStore().getLocalBucketById(bucketId);
if (userBucket == null) {
throw new BucketNotFoundException("User bucket was not found for region " + region + "bucket id " + bucketId);
} else {
repos.add(getRepository(userBucket.getId()));
}
}
return repos;
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class IndexRepositoryFactory method computeIndexRepository.
public IndexRepository computeIndexRepository(final Integer bucketId, LuceneSerializer serializer, LuceneIndexImpl index, PartitionedRegion userRegion, final IndexRepository oldRepository) throws IOException {
LuceneIndexForPartitionedRegion indexForPR = (LuceneIndexForPartitionedRegion) index;
final PartitionedRegion fileRegion = indexForPR.getFileAndChunkRegion();
BucketRegion fileAndChunkBucket = getMatchingBucket(fileRegion, bucketId);
BucketRegion dataBucket = getMatchingBucket(userRegion, bucketId);
boolean success = false;
if (fileAndChunkBucket == null) {
if (oldRepository != null) {
oldRepository.cleanup();
}
return null;
}
if (!fileAndChunkBucket.getBucketAdvisor().isPrimary()) {
if (oldRepository != null) {
oldRepository.cleanup();
}
return null;
}
if (oldRepository != null && !oldRepository.isClosed()) {
return oldRepository;
}
if (oldRepository != null) {
oldRepository.cleanup();
}
DistributedLockService lockService = getLockService();
String lockName = getLockName(fileAndChunkBucket);
while (!lockService.lock(lockName, 100, -1)) {
if (!fileAndChunkBucket.getBucketAdvisor().isPrimary()) {
return null;
}
}
final IndexRepository repo;
try {
RegionDirectory dir = new RegionDirectory(getBucketTargetingMap(fileAndChunkBucket, bucketId), indexForPR.getFileSystemStats());
IndexWriterConfig config = new IndexWriterConfig(indexForPR.getAnalyzer());
IndexWriter writer = new IndexWriter(dir, config);
repo = new IndexRepositoryImpl(fileAndChunkBucket, writer, serializer, indexForPR.getIndexStats(), dataBucket, lockService, lockName);
success = true;
return repo;
} catch (IOException e) {
logger.info("Exception thrown while constructing Lucene Index for bucket:" + bucketId + " for file region:" + fileAndChunkBucket.getFullPath());
throw e;
} finally {
if (!success) {
lockService.unlock(lockName);
}
}
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class DistributedTransactionDUnitTest method testConcurrentTXAndNonTXOperations.
/*
* Test to reproduce a scenario where: 1. On primary, the tx op is applied first followed by
* non-tx 2. On secondary, non-tx op is applied first followed by tx.
*/
@Ignore
@Test
public void testConcurrentTXAndNonTXOperations() throws Exception {
Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
createPersistentPR(new VM[] { server1 });
execute(server1, new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
Customer customerOne = new Customer("name1", "addr1");
prRegion.put(custIdOne, customerOne);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custIdOne);
String primaryMember = br.getBucketAdvisor().getPrimary().toString();
getGemfireCache().getLoggerI18n().fine("TEST:PRIMARY:" + primaryMember);
String memberId = getGemfireCache().getDistributedSystem().getMemberId();
getGemfireCache().getLoggerI18n().fine("TEST:MEMBERID:" + memberId);
return null;
}
});
createPersistentPR(new VM[] { server2 });
Boolean isPrimary = (Boolean) execute(server1, new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custIdOne);
String primaryMember = br.getBucketAdvisor().getPrimary().toString();
getGemfireCache().getLoggerI18n().fine("TEST:PRIMARY:" + primaryMember);
String memberId = getGemfireCache().getDistributedSystem().getMemberId();
getGemfireCache().getLoggerI18n().fine("TEST:MEMBERID:" + memberId);
return memberId.equals(primaryMember);
}
});
final VM primary = isPrimary.booleanValue() ? server1 : server2;
final VM secondary = !isPrimary.booleanValue() ? server1 : server2;
System.out.println("TEST:SERVER-1:VM-" + server1.getPid());
System.out.println("TEST:SERVER-2:VM-" + server2.getPid());
System.out.println("TEST:PRIMARY=VM-" + primary.getPid());
System.out.println("TEST:SECONDARY=VM-" + secondary.getPid());
class WaitRelease implements Runnable {
CountDownLatch cdl;
String op;
public WaitRelease(CountDownLatch cdl, String member) {
this.cdl = cdl;
}
@Override
public void run() {
try {
GemFireCacheImpl.getExisting().getLoggerI18n().fine("TEST:TX WAITING - " + op);
cdl.await();
GemFireCacheImpl.getExisting().getLoggerI18n().fine("TEST:TX END WAITING");
} catch (InterruptedException e) {
}
}
public void release() {
GemFireCacheImpl.getExisting().getLoggerI18n().fine("TEST:TX COUNTDOWN - " + op);
cdl.countDown();
}
}
// Install TX hook
SerializableCallable txHook = new SerializableCallable() {
@Override
public Object call() throws Exception {
CountDownLatch cdl = new CountDownLatch(1);
DistTXState.internalBeforeApplyChanges = new WaitRelease(cdl, "TX OP");
return null;
}
};
execute(secondary, txHook);
// Install non-TX hook
SerializableCallable nontxHook = new SerializableCallable() {
@Override
public Object call() throws Exception {
CountDownLatch cdl = new CountDownLatch(1);
DistTXState.internalBeforeNonTXBasicPut = new WaitRelease(cdl, "NON TX OP");
return null;
}
};
// Install the wait-release hook on the secondary
execute(secondary, nontxHook);
// Start a tx operation on primary
execute(primary, new SerializableCallable() {
@Override
public Object call() throws Exception {
// The reason this is run in a separate thread instead of controller thread
// is that this is going to block because the secondary is going to wait.
new Thread() {
public void run() {
CacheTransactionManager mgr = getGemfireCache().getTxManager();
mgr.setDistributed(true);
getGemfireCache().getLoggerI18n().fine("TEST:DISTTX=" + mgr.isDistributed());
mgr.begin();
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
Customer customerOne = new Customer("name1_tx", "addr1");
getGemfireCache().getLoggerI18n().fine("TEST:TX UPDATE");
prRegion.put(custIdOne, customerOne);
getGemfireCache().getLoggerI18n().fine("TEST:TX COMMIT");
mgr.commit();
}
}.start();
return null;
}
});
// Let the TX op be applied on primary first
Thread.currentThread().sleep(200);
// Perform a non-tx op on the same key on primary
execute(primary, new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
Customer customerOne = new Customer("name1_nontx", "addr1");
getGemfireCache().getLoggerI18n().fine("TEST:TX NONTXUPDATE");
prRegion.put(custIdOne, customerOne);
return null;
}
});
// Wait for a few milliseconds
Thread.currentThread().sleep(200);
// Release the waiting non-tx op first, on secondary
execute(secondary, new SerializableCallable() {
@Override
public Object call() throws Exception {
Runnable r = DistTXState.internalBeforeNonTXBasicPut;
assert (r != null && r instanceof WaitRelease);
WaitRelease e = (WaitRelease) r;
e.release();
return null;
}
});
// Now release the waiting commit on secondary
execute(secondary, new SerializableCallable() {
@Override
public Object call() throws Exception {
Runnable r = DistTXState.internalBeforeApplyChanges;
assert (r != null && r instanceof WaitRelease);
WaitRelease e = (WaitRelease) r;
e.release();
return null;
}
});
// Verify region and entry versions on primary and secondary
SerializableCallable verifyPrimary = new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custId = new CustId(1);
Customer customer = prRegion.get(custId);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custId);
RegionEntry re = br.getRegionEntry(custId);
getGemfireCache().getLoggerI18n().fine("TEST:TX PRIMARY CUSTOMER=" + customer);
getGemfireCache().getLoggerI18n().fine("TEST:TX PRIMARY REGION VERSION=" + re.getVersionStamp().getRegionVersion());
getGemfireCache().getLoggerI18n().fine("TEST:TX PRIMARY ENTRY VERSION=" + re.getVersionStamp().getEntryVersion());
return null;
}
};
execute(primary, verifyPrimary);
SerializableCallable verifySecondary = new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custId = new CustId(1);
Customer customer = prRegion.get(custId);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custId);
RegionEntry re = br.getRegionEntry(custId);
getGemfireCache().getLoggerI18n().fine("TEST:TX SECONDARY CUSTOMER=" + customer);
getGemfireCache().getLoggerI18n().fine("TEST:TX SECONDARY REGION VERSION=" + re.getVersionStamp().getRegionVersion());
getGemfireCache().getLoggerI18n().fine("TEST:TX SECONDARY ENTRY VERSION=" + re.getVersionStamp().getEntryVersion());
return null;
}
};
execute(secondary, verifySecondary);
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class WANTestBase method validateParallelSenderQueueAllBucketsDrained.
public static void validateParallelSenderQueueAllBucketsDrained(final String senderId) {
IgnoredException exp = IgnoredException.addIgnoredException(RegionDestroyedException.class.getName());
IgnoredException exp1 = IgnoredException.addIgnoredException(ForceReattemptException.class.getName());
try {
Set<GatewaySender> senders = cache.getGatewaySenders();
GatewaySender sender = null;
for (GatewaySender s : senders) {
if (s.getId().equals(senderId)) {
sender = s;
break;
}
}
RegionQueue regionQueue = ((AbstractGatewaySender) sender).getQueues().toArray(new RegionQueue[1])[0];
Set<BucketRegion> buckets = ((PartitionedRegion) regionQueue.getRegion()).getDataStore().getAllLocalPrimaryBucketRegions();
for (final BucketRegion bucket : buckets) {
Awaitility.await().atMost(180, TimeUnit.SECONDS).until(() -> {
assertEquals("Expected bucket entries for bucket: " + bucket.getId() + " is: 0 but actual entries: " + bucket.keySet().size() + " This bucket isPrimary: " + bucket.getBucketAdvisor().isPrimary() + " KEYSET: " + bucket.keySet(), 0, bucket.keySet().size());
});
}
// for loop ends
} finally {
exp.remove();
exp1.remove();
}
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class WANTestBase method validateParallelSenderQueueBucketSize.
public static void validateParallelSenderQueueBucketSize(final String senderId, final int bucketSize) {
Set<GatewaySender> senders = cache.getGatewaySenders();
GatewaySender sender = null;
for (GatewaySender s : senders) {
if (s.getId().equals(senderId)) {
sender = s;
break;
}
}
RegionQueue regionQueue = ((AbstractGatewaySender) sender).getQueues().toArray(new RegionQueue[1])[0];
Set<BucketRegion> buckets = ((PartitionedRegion) regionQueue.getRegion()).getDataStore().getAllLocalPrimaryBucketRegions();
for (BucketRegion bucket : buckets) {
assertEquals("Expected bucket entries for bucket " + bucket.getId() + " is different than actual.", bucketSize, bucket.keySet().size());
}
}
Aggregations