use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class PRBucketSynchronizationDUnitTest method createEntry2.
private boolean createEntry2(VM vm, final InternalDistributedMember primary, final VersionSource primaryVersionID) {
return (Boolean) vm.invoke(new SerializableCallable("create entry2") {
public Object call() {
// create a fake event that looks like it came from the primary and apply it to
// this cache
PartitionedRegion pr = (PartitionedRegion) TestRegion;
BucketRegion bucket = pr.getDataStore().getLocalBucketById(0);
VersionTag tag = new VMVersionTag();
tag.setMemberID(primaryVersionID);
tag.setRegionVersion(2);
tag.setEntryVersion(1);
tag.setIsRemoteForTesting();
EntryEventImpl event = EntryEventImpl.create(bucket, Operation.CREATE, "Object3", true, primary, true, false);
LogWriterUtils.getLogWriter().info("applying this event to the cache: " + event);
event.setNewValue(new VMCachedDeserializable("value3", 12));
event.setVersionTag(tag);
bucket.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false);
event.release();
// now create a tombstone so we can be sure these are transferred in delta-GII
tag = new VMVersionTag();
tag.setMemberID(primaryVersionID);
tag.setRegionVersion(3);
tag.setEntryVersion(1);
tag.setIsRemoteForTesting();
event = EntryEventImpl.create(bucket, Operation.CREATE, "Object5", true, primary, true, false);
event.setNewValue(Token.TOMBSTONE);
event.setVersionTag(tag);
LogWriterUtils.getLogWriter().info("applying this event to the cache: " + event);
bucket.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false);
event.release();
bucket.dumpBackingMap();
LogWriterUtils.getLogWriter().info("bucket version vector is now " + bucket.getVersionVector().fullToString());
assertTrue("bucket should hold entry Object3 now", bucket.containsKey("Object3"));
return true;
}
});
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class DistributedTransactionDUnitTest method testRegionAndEntryVersionsPR.
@Test
public void testRegionAndEntryVersionsPR() throws Exception {
Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
createPersistentPR(new VM[] { server1, server2 });
execute(server2, new SerializableCallable() {
@Override
public Object call() throws Exception {
CacheTransactionManager mgr = getGemfireCache().getTxManager();
mgr.setDistributed(true);
getGemfireCache().getLoggerI18n().fine("TEST:DISTTX=" + mgr.isDistributed());
getGemfireCache().getLoggerI18n().fine("TEST:TX BEGIN");
mgr.begin();
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
Customer customerOne = new Customer("name1", "addr1");
getGemfireCache().getLoggerI18n().fine("TEST:TX PUT 1");
prRegion.put(custIdOne, customerOne);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custIdOne);
assertEquals(0L, br.getVersionVector().getCurrentVersion());
getGemfireCache().getLoggerI18n().fine("TEST:TX COMMIT 1");
mgr.commit();
// Verify region version on the region
assertEquals(1L, br.getVersionVector().getCurrentVersion());
RegionEntry re = br.getRegionEntry(custIdOne);
getGemfireCache().getLoggerI18n().fine("TEST:VERSION-STAMP:" + re.getVersionStamp());
// Verify region version on the region entry
assertEquals(1L, re.getVersionStamp().getRegionVersion());
// Verify entry version
assertEquals(1, re.getVersionStamp().getEntryVersion());
mgr.begin();
prRegion.put(custIdOne, new Customer("name1_1", "addr1"));
getGemfireCache().getLoggerI18n().fine("TEST:TX COMMIT 2");
assertEquals(1L, br.getVersionVector().getCurrentVersion());
mgr.commit();
// Verify region version on the region
assertEquals(2L, br.getVersionVector().getCurrentVersion());
re = br.getRegionEntry(custIdOne);
getGemfireCache().getLoggerI18n().fine("TEST:VERSION-STAMP:" + re.getVersionStamp());
// Verify region version on the region entry
assertEquals(2L, re.getVersionStamp().getRegionVersion());
// Verify entry version
assertEquals(2, re.getVersionStamp().getEntryVersion());
return null;
}
});
execute(server1, new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custIdOne);
// Verify region version on the region
assertEquals(2L, br.getVersionVector().getCurrentVersion());
// Verify region version ont the region entry
RegionEntry re = br.getRegionEntry(custIdOne);
assertEquals(2L, re.getVersionStamp().getRegionVersion());
// Verify entry version
assertEquals(2, re.getVersionStamp().getEntryVersion());
return null;
}
});
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class AbstractPartitionedRepositoryManager method getRepositories.
@Override
public Collection<IndexRepository> getRepositories(RegionFunctionContext ctx) throws BucketNotFoundException {
Region<Object, Object> region = ctx.getDataSet();
Set<Integer> buckets = ((InternalRegionFunctionContext) ctx).getLocalBucketSet(region);
ArrayList<IndexRepository> repos = new ArrayList<IndexRepository>(buckets.size());
for (Integer bucketId : buckets) {
BucketRegion userBucket = userRegion.getDataStore().getLocalBucketById(bucketId);
if (userBucket == null) {
throw new BucketNotFoundException("User bucket was not found for region " + region + "bucket id " + bucketId);
} else {
repos.add(getRepository(userBucket.getId()));
}
}
return repos;
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class IndexRepositoryFactory method computeIndexRepository.
public IndexRepository computeIndexRepository(final Integer bucketId, LuceneSerializer serializer, LuceneIndexImpl index, PartitionedRegion userRegion, final IndexRepository oldRepository) throws IOException {
LuceneIndexForPartitionedRegion indexForPR = (LuceneIndexForPartitionedRegion) index;
final PartitionedRegion fileRegion = indexForPR.getFileAndChunkRegion();
BucketRegion fileAndChunkBucket = getMatchingBucket(fileRegion, bucketId);
BucketRegion dataBucket = getMatchingBucket(userRegion, bucketId);
boolean success = false;
if (fileAndChunkBucket == null) {
if (oldRepository != null) {
oldRepository.cleanup();
}
return null;
}
if (!fileAndChunkBucket.getBucketAdvisor().isPrimary()) {
if (oldRepository != null) {
oldRepository.cleanup();
}
return null;
}
if (oldRepository != null && !oldRepository.isClosed()) {
return oldRepository;
}
if (oldRepository != null) {
oldRepository.cleanup();
}
DistributedLockService lockService = getLockService();
String lockName = getLockName(fileAndChunkBucket);
while (!lockService.lock(lockName, 100, -1)) {
if (!fileAndChunkBucket.getBucketAdvisor().isPrimary()) {
return null;
}
}
final IndexRepository repo;
try {
RegionDirectory dir = new RegionDirectory(getBucketTargetingMap(fileAndChunkBucket, bucketId), indexForPR.getFileSystemStats());
IndexWriterConfig config = new IndexWriterConfig(indexForPR.getAnalyzer());
IndexWriter writer = new IndexWriter(dir, config);
repo = new IndexRepositoryImpl(fileAndChunkBucket, writer, serializer, indexForPR.getIndexStats(), dataBucket, lockService, lockName);
success = true;
return repo;
} catch (IOException e) {
logger.info("Exception thrown while constructing Lucene Index for bucket:" + bucketId + " for file region:" + fileAndChunkBucket.getFullPath());
throw e;
} finally {
if (!success) {
lockService.unlock(lockName);
}
}
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class DistributedTransactionDUnitTest method testConcurrentTXAndNonTXOperations.
/*
* Test to reproduce a scenario where: 1. On primary, the tx op is applied first followed by
* non-tx 2. On secondary, non-tx op is applied first followed by tx.
*/
@Ignore
@Test
public void testConcurrentTXAndNonTXOperations() throws Exception {
Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
createPersistentPR(new VM[] { server1 });
execute(server1, new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
Customer customerOne = new Customer("name1", "addr1");
prRegion.put(custIdOne, customerOne);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custIdOne);
String primaryMember = br.getBucketAdvisor().getPrimary().toString();
getGemfireCache().getLoggerI18n().fine("TEST:PRIMARY:" + primaryMember);
String memberId = getGemfireCache().getDistributedSystem().getMemberId();
getGemfireCache().getLoggerI18n().fine("TEST:MEMBERID:" + memberId);
return null;
}
});
createPersistentPR(new VM[] { server2 });
Boolean isPrimary = (Boolean) execute(server1, new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custIdOne);
String primaryMember = br.getBucketAdvisor().getPrimary().toString();
getGemfireCache().getLoggerI18n().fine("TEST:PRIMARY:" + primaryMember);
String memberId = getGemfireCache().getDistributedSystem().getMemberId();
getGemfireCache().getLoggerI18n().fine("TEST:MEMBERID:" + memberId);
return memberId.equals(primaryMember);
}
});
final VM primary = isPrimary.booleanValue() ? server1 : server2;
final VM secondary = !isPrimary.booleanValue() ? server1 : server2;
System.out.println("TEST:SERVER-1:VM-" + server1.getPid());
System.out.println("TEST:SERVER-2:VM-" + server2.getPid());
System.out.println("TEST:PRIMARY=VM-" + primary.getPid());
System.out.println("TEST:SECONDARY=VM-" + secondary.getPid());
class WaitRelease implements Runnable {
CountDownLatch cdl;
String op;
public WaitRelease(CountDownLatch cdl, String member) {
this.cdl = cdl;
}
@Override
public void run() {
try {
GemFireCacheImpl.getExisting().getLoggerI18n().fine("TEST:TX WAITING - " + op);
cdl.await();
GemFireCacheImpl.getExisting().getLoggerI18n().fine("TEST:TX END WAITING");
} catch (InterruptedException e) {
}
}
public void release() {
GemFireCacheImpl.getExisting().getLoggerI18n().fine("TEST:TX COUNTDOWN - " + op);
cdl.countDown();
}
}
// Install TX hook
SerializableCallable txHook = new SerializableCallable() {
@Override
public Object call() throws Exception {
CountDownLatch cdl = new CountDownLatch(1);
DistTXState.internalBeforeApplyChanges = new WaitRelease(cdl, "TX OP");
return null;
}
};
execute(secondary, txHook);
// Install non-TX hook
SerializableCallable nontxHook = new SerializableCallable() {
@Override
public Object call() throws Exception {
CountDownLatch cdl = new CountDownLatch(1);
DistTXState.internalBeforeNonTXBasicPut = new WaitRelease(cdl, "NON TX OP");
return null;
}
};
// Install the wait-release hook on the secondary
execute(secondary, nontxHook);
// Start a tx operation on primary
execute(primary, new SerializableCallable() {
@Override
public Object call() throws Exception {
// The reason this is run in a separate thread instead of controller thread
// is that this is going to block because the secondary is going to wait.
new Thread() {
public void run() {
CacheTransactionManager mgr = getGemfireCache().getTxManager();
mgr.setDistributed(true);
getGemfireCache().getLoggerI18n().fine("TEST:DISTTX=" + mgr.isDistributed());
mgr.begin();
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
Customer customerOne = new Customer("name1_tx", "addr1");
getGemfireCache().getLoggerI18n().fine("TEST:TX UPDATE");
prRegion.put(custIdOne, customerOne);
getGemfireCache().getLoggerI18n().fine("TEST:TX COMMIT");
mgr.commit();
}
}.start();
return null;
}
});
// Let the TX op be applied on primary first
Thread.currentThread().sleep(200);
// Perform a non-tx op on the same key on primary
execute(primary, new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custIdOne = new CustId(1);
Customer customerOne = new Customer("name1_nontx", "addr1");
getGemfireCache().getLoggerI18n().fine("TEST:TX NONTXUPDATE");
prRegion.put(custIdOne, customerOne);
return null;
}
});
// Wait for a few milliseconds
Thread.currentThread().sleep(200);
// Release the waiting non-tx op first, on secondary
execute(secondary, new SerializableCallable() {
@Override
public Object call() throws Exception {
Runnable r = DistTXState.internalBeforeNonTXBasicPut;
assert (r != null && r instanceof WaitRelease);
WaitRelease e = (WaitRelease) r;
e.release();
return null;
}
});
// Now release the waiting commit on secondary
execute(secondary, new SerializableCallable() {
@Override
public Object call() throws Exception {
Runnable r = DistTXState.internalBeforeApplyChanges;
assert (r != null && r instanceof WaitRelease);
WaitRelease e = (WaitRelease) r;
e.release();
return null;
}
});
// Verify region and entry versions on primary and secondary
SerializableCallable verifyPrimary = new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custId = new CustId(1);
Customer customer = prRegion.get(custId);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custId);
RegionEntry re = br.getRegionEntry(custId);
getGemfireCache().getLoggerI18n().fine("TEST:TX PRIMARY CUSTOMER=" + customer);
getGemfireCache().getLoggerI18n().fine("TEST:TX PRIMARY REGION VERSION=" + re.getVersionStamp().getRegionVersion());
getGemfireCache().getLoggerI18n().fine("TEST:TX PRIMARY ENTRY VERSION=" + re.getVersionStamp().getEntryVersion());
return null;
}
};
execute(primary, verifyPrimary);
SerializableCallable verifySecondary = new SerializableCallable() {
@Override
public Object call() throws Exception {
Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
CustId custId = new CustId(1);
Customer customer = prRegion.get(custId);
BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custId);
RegionEntry re = br.getRegionEntry(custId);
getGemfireCache().getLoggerI18n().fine("TEST:TX SECONDARY CUSTOMER=" + customer);
getGemfireCache().getLoggerI18n().fine("TEST:TX SECONDARY REGION VERSION=" + re.getVersionStamp().getRegionVersion());
getGemfireCache().getLoggerI18n().fine("TEST:TX SECONDARY ENTRY VERSION=" + re.getVersionStamp().getEntryVersion());
return null;
}
};
execute(secondary, verifySecondary);
}
Aggregations