use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class MultiRegionFunctionExecutionDUnitTest method createRegionsOnVm1.
@SuppressWarnings("unchecked")
public static void createRegionsOnVm1() {
new MultiRegionFunctionExecutionDUnitTest().createCache();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setTotalNumBuckets(12);
pf.setRedundantCopies(1);
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(pf.create());
cache.createRegion("PR1", factory.create());
factory = new AttributesFactory();
factory.setDataPolicy(DataPolicy.NORMAL);
cache.createRegion("RR1", factory.create());
factory = new AttributesFactory();
factory.setDataPolicy(DataPolicy.REPLICATE);
cache.createRegion("RR2", factory.create());
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class MultiRegionFunctionExecutionDUnitTest method createRegionsOnVm2.
@SuppressWarnings("unchecked")
public static void createRegionsOnVm2() {
new MultiRegionFunctionExecutionDUnitTest().createCache();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setTotalNumBuckets(12);
pf.setRedundantCopies(1);
pf.setLocalMaxMemory(0);
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(pf.create());
cache.createRegion("PR1", factory.create());
pf = new PartitionAttributesFactory();
pf.setTotalNumBuckets(12);
pf.setRedundantCopies(1);
factory = new AttributesFactory();
factory.setPartitionAttributes(pf.create());
cache.createRegion("PR2", factory.create());
factory = new AttributesFactory();
factory.setDataPolicy(DataPolicy.EMPTY);
cache.createRegion("RR1", factory.create());
factory = new AttributesFactory();
factory.setDataPolicy(DataPolicy.NORMAL);
cache.createRegion("RR2", factory.create());
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class MultiRegionFunctionExecutionDUnitTest method createRegionsOnVm3.
@SuppressWarnings("unchecked")
public static void createRegionsOnVm3() {
new MultiRegionFunctionExecutionDUnitTest().createCache();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setTotalNumBuckets(12);
pf.setRedundantCopies(1);
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(pf.create());
cache.createRegion("PR1", factory.create());
pf = new PartitionAttributesFactory();
pf.setTotalNumBuckets(12);
pf.setRedundantCopies(1);
factory = new AttributesFactory();
factory.setPartitionAttributes(pf.create());
cache.createRegion("PR2", factory.create());
factory = new AttributesFactory();
factory.setDataPolicy(DataPolicy.REPLICATE);
cache.createRegion("RR1", factory.create());
factory = new AttributesFactory();
factory.setDataPolicy(DataPolicy.REPLICATE);
cache.createRegion("RR2", factory.create());
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PRFunctionExecutionWithResultSenderDUnitTest method createColoRegionAttrs.
private RegionAttributes createColoRegionAttrs(int red, int mem, String coloRegion) {
final TestResolver resolver = new TestResolver();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(new PartitionAttributesFactory().setPartitionResolver(resolver).setRedundantCopies(red).setLocalMaxMemory(mem).setColocatedWith(coloRegion).create());
return attr.create();
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class Bug39356DUnitTest method testCrashWhileCreatingABucket.
/**
* This tests the case where the VM forcing other VMs to create a bucket crashes while creating
* the bucket.
*/
@Test
public void testCrashWhileCreatingABucket() {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
final VM vm1 = host.getVM(1);
final VM vm2 = host.getVM(2);
SerializableRunnable createParReg = new SerializableRunnable("Create parReg") {
public void run() {
DistributionMessageObserver.setInstance(new MyRegionObserver(vm0));
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setRedundantCopies(1);
pf.setRecoveryDelay(0);
af.setDataPolicy(DataPolicy.PARTITION);
af.setPartitionAttributes(pf.create());
cache.createRegion(REGION_NAME, af.create());
}
};
vm1.invoke(createParReg);
vm2.invoke(createParReg);
SerializableRunnable createParRegAccessor = new SerializableRunnable("Create parReg") {
public void run() {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setRedundantCopies(1);
pf.setLocalMaxMemory(0);
af.setDataPolicy(DataPolicy.PARTITION);
af.setPartitionAttributes(pf.create());
Region r = cache.createRegion(REGION_NAME, af.create());
// trigger the creation of a bucket, which should trigger the destruction of this VM.
try {
r.put("ping", "pong");
fail("Should have gotten a CancelException");
} catch (CancelException e) {
// this is ok, we expect our observer to close this cache.
}
}
};
vm0.invoke(createParRegAccessor);
SerializableRunnable verifyBuckets = new SerializableRunnable("Verify buckets") {
public void run() {
LogWriter log = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
Cache cache = getCache();
PartitionedRegion r = (PartitionedRegion) cache.getRegion(REGION_NAME);
for (int i = 0; i < r.getAttributes().getPartitionAttributes().getTotalNumBuckets(); i++) {
List owners = null;
while (owners == null) {
try {
owners = r.getBucketOwnersForValidation(i);
} catch (ForceReattemptException e) {
log.info(Bug39356DUnitTest.class + " verify buckets Caught a ForceReattemptException");
Wait.pause(1000);
}
}
if (owners.isEmpty()) {
log.info("skipping bucket " + i + " because it has no data");
continue;
}
assertEquals("Expecting bucket " + i + " to have two copies", 2, owners.size());
log.info("bucket " + i + " had two copies");
}
}
};
vm1.invoke(verifyBuckets);
vm2.invoke(verifyBuckets);
}
Aggregations