use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class Bug43684DUnitTest method createServerCache.
@SuppressWarnings("rawtypes")
public static Integer createServerCache(Boolean isReplicated, Boolean isPrimaryEmpty) throws Exception {
disconnectFromDS();
Properties props = new Properties();
props.setProperty(LOCATORS, "localhost[" + DistributedTestUtils.getDUnitLocatorPort() + "]");
props.setProperty(STATISTIC_ARCHIVE_FILE, "server_" + OSProcess.getId() + ".gfs");
props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
CacheFactory cf = new CacheFactory(props);
cache = (GemFireCacheImpl) cf.create();
RegionFactory rf;
if (isReplicated) {
RegionShortcut rs = isPrimaryEmpty ? RegionShortcut.REPLICATE_PROXY : RegionShortcut.REPLICATE;
rf = cache.createRegionFactory(rs);
} else {
RegionShortcut rs = isPrimaryEmpty ? RegionShortcut.PARTITION_PROXY : RegionShortcut.PARTITION;
rf = cache.createRegionFactory(rs);
rf.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(numBuckets).create());
}
rf.create(REGION_NAME);
CacheServerImpl server = (CacheServerImpl) cache.addCacheServer();
server.setPort(AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET));
server.start();
return server.getPort();
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class ShutdownAllDUnitTest method getCreatePRRunnable.
private SerializableRunnable getCreatePRRunnable(final String regionName, final String diskStoreName, final int redundancy) {
return new SerializableRunnable("create pr") {
@Override
public void run() {
final CountDownLatch recoveryDone;
if (redundancy > 0) {
recoveryDone = new CountDownLatch(1);
ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
recoveryDone.countDown();
}
};
InternalResourceManager.setResourceObserver(observer);
} else {
recoveryDone = null;
}
Cache cache = ShutdownAllDUnitTest.this.getCache();
if (diskStoreName != null) {
DiskStore ds = cache.findDiskStore(diskStoreName);
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create(diskStoreName);
}
}
AttributesFactory af = new AttributesFactory();
// use async to trigger flush
af.setDiskSynchronous(false);
af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
af.setPartitionAttributes(paf.create());
if (diskStoreName != null) {
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName(diskStoreName);
} else {
af.setDataPolicy(DataPolicy.PARTITION);
}
cache.createRegion(regionName, af.create());
if (recoveryDone != null) {
try {
recoveryDone.await();
} catch (InterruptedException e) {
Assert.fail("Interrupted", e);
}
}
}
};
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PersistentPartitionedRegionOldConfigDUnitTest method getPersistentPRAttributes.
@Override
protected RegionAttributes getPersistentPRAttributes(final int redundancy, final int recoveryDelay, Cache cache, int numBuckets, boolean synchronous) {
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
paf.setRecoveryDelay(recoveryDelay);
paf.setTotalNumBuckets(numBuckets);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskDirs(getDiskDirs());
RegionAttributes attr = af.create();
return attr;
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PersistentPartitionedRegionTestBase method getPersistentPRAttributes.
protected RegionAttributes getPersistentPRAttributes(final int redundancy, final int recoveryDelay, Cache cache, int numBuckets, boolean synchronous) {
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
paf.setRecoveryDelay(recoveryDelay);
paf.setTotalNumBuckets(numBuckets);
// Make sure all vms end up with the same local max memory
paf.setLocalMaxMemory(500);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
af.setDiskSynchronous(synchronous);
RegionAttributes attr = af.create();
return attr;
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class ClientServerForceInvalidateDUnitTest method createServerCache.
private static Integer createServerCache(Boolean concurrencyChecksEnabled, Boolean partitioned, Integer maxThreads) throws Exception {
AbstractRegionMap.FORCE_INVALIDATE_EVENT = true;
Properties props = new Properties();
Cache cache = new ClientServerForceInvalidateDUnitTest().createCacheV(props);
RegionFactory<String, String> factory = cache.createRegionFactory();
if (partitioned) {
factory.setDataPolicy(DataPolicy.PARTITION);
factory.setPartitionAttributes(new PartitionAttributesFactory<String, String>().setRedundantCopies(0).setTotalNumBuckets(251).create());
} else {
factory.setDataPolicy(DataPolicy.REPLICATE);
}
factory.setConcurrencyChecksEnabled(concurrencyChecksEnabled);
factory.addCacheListener(new ServerListener());
Region<String, String> r1 = factory.create(REGION_NAME1);
assertNotNull(r1);
CacheServer server = cache.addCacheServer();
int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
logger.info("Starting server on port " + port);
server.setPort(port);
server.setMaxThreads(maxThreads.intValue());
server.start();
logger.info("Started server on port " + server.getPort());
return new Integer(server.getPort());
}
Aggregations