use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PRClientServerTestBase method createCacheServerWith2Regions.
public static Integer createCacheServerWith2Regions(ArrayList commonAttributes, Integer localMaxMemory) throws Exception {
AttributesFactory factory = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setPartitionResolver((PartitionResolver) commonAttributes.get(1));
paf.setRedundantCopies(((Integer) commonAttributes.get(2)).intValue());
paf.setTotalNumBuckets(((Integer) commonAttributes.get(3)).intValue());
paf.setColocatedWith((String) commonAttributes.get(4));
paf.setLocalMaxMemory(localMaxMemory.intValue());
PartitionAttributes partitionAttributes = paf.create();
factory.setDataPolicy(DataPolicy.PARTITION);
factory.setPartitionAttributes(partitionAttributes);
RegionAttributes attrs = factory.create();
Region region1 = cache.createRegion(PartitionedRegionName + "1", attrs);
assertNotNull(region1);
Region region2 = cache.createRegion(PartitionedRegionName + "2", attrs);
assertNotNull(region2);
CacheServer server1 = cache.addCacheServer();
assertNotNull(server1);
int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
server1.setPort(port);
server1.start();
assertTrue(server1.isRunning());
return new Integer(server1.getPort());
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PRClientServerTestBase method createSelectorCacheServer.
public static Integer createSelectorCacheServer(ArrayList commonAttributes, Integer localMaxMemory) throws Exception {
AttributesFactory factory = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setPartitionResolver((PartitionResolver) commonAttributes.get(1));
paf.setRedundantCopies(((Integer) commonAttributes.get(2)).intValue());
paf.setTotalNumBuckets(((Integer) commonAttributes.get(3)).intValue());
paf.setColocatedWith((String) commonAttributes.get(4));
paf.setLocalMaxMemory(localMaxMemory.intValue());
PartitionAttributes partitionAttributes = paf.create();
factory.setDataPolicy(DataPolicy.PARTITION);
factory.setPartitionAttributes(partitionAttributes);
RegionAttributes attrs = factory.create();
Region region = cache.createRegion((String) commonAttributes.get(0), attrs);
assertNotNull(region);
CacheServer server1 = cache.addCacheServer();
assertNotNull(server1);
int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
server1.setPort(port);
server1.setMaxThreads(16);
server1.start();
assertTrue(server1.isRunning());
return new Integer(server1.getPort());
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class GemfireDataCommandsDUnitTest method setupForGetPutRemoveLocateEntry.
void setupForGetPutRemoveLocateEntry(String testName) {
final VM vm1 = Host.getHost(0).getVM(1);
final VM vm2 = Host.getHost(0).getVM(2);
Properties props = new Properties();
props.setProperty(NAME, testName + "Manager");
HeadlessGfsh gfsh = setUpJmxManagerOnVm0ThenConnect(props);
assertNotNull(gfsh);
assertEquals(true, gfsh.isConnectedAndReady());
vm1.invoke(new SerializableRunnable() {
public void run() {
InternalCache cache = getCache();
RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
Region dataRegion = regionFactory.create(DATA_REGION_NAME);
assertNotNull(dataRegion);
getLogWriter().info("Created Region " + dataRegion);
dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1, dataRegion.getAttributes());
assertNotNull(dataRegion);
getLogWriter().info("Created Region " + dataRegion);
dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1_2, dataRegion.getAttributes());
assertNotNull(dataRegion);
getLogWriter().info("Created Region " + dataRegion);
dataRegion = regionFactory.create(DATA_REGION_NAME_VM1);
assertNotNull(dataRegion);
getLogWriter().info("Created Region " + dataRegion);
PartitionAttributes partitionAttrs = new PartitionAttributesFactory().setRedundantCopies(2).create();
RegionFactory<Object, Object> partitionRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
partitionRegionFactory.setPartitionAttributes(partitionAttrs);
Region dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME);
assertNotNull(dataParRegion);
getLogWriter().info("Created Region " + dataParRegion);
dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME_VM1);
assertNotNull(dataParRegion);
getLogWriter().info("Created Region " + dataParRegion);
}
});
vm2.invoke(new SerializableRunnable() {
public void run() {
InternalCache cache = getCache();
RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
Region dataRegion = regionFactory.create(DATA_REGION_NAME);
assertNotNull(dataRegion);
getLogWriter().info("Created Region " + dataRegion);
dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1, dataRegion.getAttributes());
assertNotNull(dataRegion);
getLogWriter().info("Created Region " + dataRegion);
dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1_2, dataRegion.getAttributes());
assertNotNull(dataRegion);
getLogWriter().info("Created Region " + dataRegion);
dataRegion = regionFactory.create(DATA_REGION_NAME_VM2);
assertNotNull(dataRegion);
getLogWriter().info("Created Region " + dataRegion);
PartitionAttributes partitionAttrs = new PartitionAttributesFactory().setRedundantCopies(2).create();
RegionFactory<Object, Object> partitionRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
partitionRegionFactory.setPartitionAttributes(partitionAttrs);
Region dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME);
assertNotNull(dataParRegion);
getLogWriter().info("Created Region " + dataParRegion);
dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME_VM2);
assertNotNull(dataParRegion);
getLogWriter().info("Created Region " + dataParRegion);
}
});
final String vm1MemberId = vm1.invoke(() -> getMemberId());
final String vm2MemberId = vm2.invoke(() -> getMemberId());
getLogWriter().info("Vm1 ID : " + vm1MemberId);
getLogWriter().info("Vm2 ID : " + vm2MemberId);
final VM manager = Host.getHost(0).getVM(0);
SerializableRunnable checkRegionMBeans = new SerializableRunnable() {
@Override
public void run() {
InternalCache cache = getCache();
final ManagementService service = ManagementService.getManagementService(cache);
final WaitCriterion waitForMaangerMBean = new WaitCriterion() {
@Override
public boolean done() {
ManagerMXBean bean1 = service.getManagerMXBean();
DistributedRegionMXBean bean2 = service.getDistributedRegionMXBean(DATA_REGION_NAME_PATH);
if (bean1 == null) {
getLogWriter().info("Still probing for ManagerMBean");
return false;
} else {
getLogWriter().info("Still probing for DistributedRegionMXBean=" + bean2);
return (bean2 != null);
}
}
@Override
public String description() {
return "Probing for ManagerMBean";
}
};
waitForCriterion(waitForMaangerMBean, 30000, 2000, true);
assertNotNull(service.getMemberMXBean());
assertNotNull(service.getManagerMXBean());
DistributedRegionMXBean bean = service.getDistributedRegionMXBean(DATA_REGION_NAME_PATH);
assertNotNull(bean);
WaitCriterion waitForRegionMBeans = new WaitCriterion() {
@Override
public boolean done() {
DistributedRegionMXBean[] beans = new DistributedRegionMXBean[6];
beans[0] = service.getDistributedRegionMXBean(DATA_REGION_NAME_PATH);
beans[1] = service.getDistributedRegionMXBean(DATA_REGION_NAME_VM1_PATH);
beans[2] = service.getDistributedRegionMXBean(DATA_REGION_NAME_VM2_PATH);
beans[3] = service.getDistributedRegionMXBean(DATA_PAR_REGION_NAME_PATH);
beans[4] = service.getDistributedRegionMXBean(DATA_PAR_REGION_NAME_VM1_PATH);
beans[5] = service.getDistributedRegionMXBean(DATA_PAR_REGION_NAME_VM2_PATH);
// SubRegion Bug : Proxy creation has some issues.
// beans[6] = service.getDistributedRegionMXBean(DATA_REGION_NAME_CHILD_1_PATH);
// beans[7] = service.getDistributedRegionMXBean(DATA_REGION_NAME_CHILD_1_2_PATH);
boolean flag = true;
for (DistributedRegionMXBean b : beans) {
if (b == null) {
flag = false;
break;
}
}
if (!flag) {
getLogWriter().info("Still probing for regionMbeans " + DATA_REGION_NAME_PATH + "=" + beans[0] + " " + DATA_REGION_NAME_VM1_PATH + "=" + beans[1] + " " + DATA_REGION_NAME_VM2_PATH + "=" + beans[2] + " " + DATA_PAR_REGION_NAME_PATH + "=" + beans[3] + " " + DATA_PAR_REGION_NAME_VM1_PATH + "=" + beans[4] + " " + DATA_PAR_REGION_NAME_VM2_PATH + "=" + beans[5] + " ");
return false;
} else {
getLogWriter().info("Probing complete for regionMbeans " + DATA_REGION_NAME_PATH + "=" + beans[0] + " " + DATA_REGION_NAME_VM1_PATH + "=" + beans[1] + " " + DATA_REGION_NAME_VM2_PATH + "=" + beans[2] + " " + DATA_PAR_REGION_NAME_PATH + "=" + beans[3] + " " + DATA_PAR_REGION_NAME_VM1_PATH + "=" + beans[4] + " " + DATA_PAR_REGION_NAME_VM2_PATH + "=" + beans[5] + " ");
// bean1.getMemberCount()==1)
return true;
// else{
// getLogWriter().info("Still probing for regionMbeans for aggregation bean1=" +
// bean1.getMemberCount() + " bean2="+ bean2.getMemberCount() + " bean3" +
// bean3.getMemberCount());
// return false;
// }
}
}
@Override
public String description() {
return "Probing for regionMbeans";
}
};
waitForCriterion(waitForRegionMBeans, 30000, 2000, true);
String[] regions = { DATA_REGION_NAME_PATH, DATA_REGION_NAME_VM1_PATH, DATA_REGION_NAME_VM2_PATH, DATA_PAR_REGION_NAME_PATH, DATA_PAR_REGION_NAME_VM1_PATH, DATA_PAR_REGION_NAME_VM2_PATH };
for (String region : regions) {
bean = service.getDistributedRegionMXBean(region);
assertNotNull(bean);
String[] membersName = bean.getMembers();
getLogWriter().info("Members Array for region " + region + " : " + StringUtils.objectToString(membersName, true, 10));
if (bean.getMemberCount() < 1)
fail("Even after waiting mbean reports number of member hosting region " + DATA_REGION_NAME_VM1_PATH + " is less than one");
// assertIndexDetailsEquals(1, membersName.length); //exists in one members vm1
}
}
};
manager.invoke(checkRegionMBeans);
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PartitionedRegionEvictionDUnitTest method testEvictionValidationForLRUEntry_AccessorFirst.
@Test
public void testEvictionValidationForLRUEntry_AccessorFirst() {
final Host host = Host.getHost(0);
final VM firstAccessor = host.getVM(0);
final VM testAccessor = host.getVM(1);
final VM testDatastore = host.getVM(2);
final VM firstDatastore = host.getVM(3);
final String uniqName = getUniqueName();
final int redundantCopies = 1;
final int maxEntries = 226;
final String name = uniqName + "-PR";
final EvictionAttributes firstEvictionAttrs = EvictionAttributes.createLRUEntryAttributes(maxEntries, EvictionAction.LOCAL_DESTROY);
final EvictionAttributes secondEvictionAttrs = EvictionAttributes.createLRUEntryAttributes(maxEntries, EvictionAction.OVERFLOW_TO_DISK);
final SerializableRunnable createFirstAccessor = new CacheSerializableRunnable("Create an accessor without eviction attributes") {
public void run2() {
final PartitionAttributes pra = new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setLocalMaxMemory(0).create();
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeap());
factory.setPartitionAttributes(pra);
final Region pr = createRootRegion(name, factory.create());
assertNotNull(pr);
}
};
final SerializableRunnable createFirstDataStore = new CacheSerializableRunnable("Create a data store with eviction attributes") {
public void run2() {
final PartitionAttributes pra = new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setLocalMaxMemory(0).create();
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeap());
factory.setPartitionAttributes(pra);
factory.setEvictionAttributes(firstEvictionAttrs);
final Region pr = createRootRegion(name, factory.create());
assertNotNull(pr);
}
};
final SerializableRunnable createSecondAccessor = new CacheSerializableRunnable("Create an accessor with incorrect eviction attributes") {
public void run2() {
final PartitionAttributes pra = new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setLocalMaxMemory(0).create();
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeap());
factory.setPartitionAttributes(pra);
factory.setEvictionAttributes(secondEvictionAttrs);
final Region pr = createRootRegion(name, factory.create());
assertNotNull(pr);
}
};
final SerializableRunnable createSecondDataStore = new CacheSerializableRunnable("Create a data store with eviction attributes") {
public void run2() {
final PartitionAttributes pra = new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setLocalMaxMemory(0).create();
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeap());
factory.setPartitionAttributes(pra);
factory.setEvictionAttributes(firstEvictionAttrs);
final Region pr = createRootRegion(name, factory.create());
assertNotNull(pr);
}
};
firstAccessor.invoke(createFirstAccessor);
firstDatastore.invoke(createFirstDataStore);
testAccessor.invoke(createSecondAccessor);
testDatastore.invoke(createSecondDataStore);
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PartitionedRegionEvictionDUnitTest method testEvictionValidationForLRUEntry_DatastoreFirst.
@Test
public void testEvictionValidationForLRUEntry_DatastoreFirst() {
final Host host = Host.getHost(0);
final VM firstAccessor = host.getVM(0);
final VM testAccessor = host.getVM(1);
final VM testDatastore = host.getVM(2);
final VM firstDatastore = host.getVM(3);
final String uniqName = getUniqueName();
final int redundantCopies = 1;
final int maxEntries = 226;
final String name = uniqName + "-PR";
final EvictionAttributes firstEvictionAttrs = EvictionAttributes.createLRUEntryAttributes(maxEntries, EvictionAction.LOCAL_DESTROY);
final EvictionAttributes secondEvictionAttrs = EvictionAttributes.createLRUEntryAttributes(maxEntries, EvictionAction.OVERFLOW_TO_DISK);
final SerializableRunnable createFirstAccessor = new CacheSerializableRunnable("Create an accessor without eviction attributes") {
public void run2() {
final PartitionAttributes pra = new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setLocalMaxMemory(0).create();
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeap());
factory.setPartitionAttributes(pra);
final Region pr = createRootRegion(name, factory.create());
assertNotNull(pr);
}
};
final SerializableRunnable createFirstDataStore = new CacheSerializableRunnable("Create a data store with eviction attributes") {
public void run2() {
final PartitionAttributes pra = new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setLocalMaxMemory(0).create();
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeap());
factory.setPartitionAttributes(pra);
factory.setEvictionAttributes(firstEvictionAttrs);
final Region pr = createRootRegion(name, factory.create());
assertNotNull(pr);
}
};
final SerializableRunnable createSecondAccessor = new CacheSerializableRunnable("Create an accessor with incorrect eviction attributes") {
public void run2() {
final PartitionAttributes pra = new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setLocalMaxMemory(0).create();
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeap());
factory.setPartitionAttributes(pra);
factory.setEvictionAttributes(secondEvictionAttrs);
final Region pr = createRootRegion(name, factory.create());
assertNotNull(pr);
}
};
final SerializableRunnable createSecondDataStore = new CacheSerializableRunnable("Create a data store with eviction attributes") {
public void run2() {
final PartitionAttributes pra = new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setLocalMaxMemory(0).create();
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeap());
factory.setPartitionAttributes(pra);
factory.setEvictionAttributes(firstEvictionAttrs);
final Region pr = createRootRegion(name, factory.create());
assertNotNull(pr);
}
};
firstDatastore.invoke(createFirstDataStore);
firstAccessor.invoke(createFirstAccessor);
testDatastore.invoke(createSecondDataStore);
testAccessor.invoke(createSecondAccessor);
}
Aggregations