use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class IndexCreationDeadLockJUnitTest method testIndexCreationDeadLockForOverflowToDiskRegion.
/**
* Tests inability to create index on a region which overflows to disk *
*/
@Test
public void testIndexCreationDeadLockForOverflowToDiskRegion() {
this.region.destroyRegion();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setValueConstraint(Portfolio.class);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK));
factory.setIndexMaintenanceSynchronous(true);
File dir = new File("test");
dir.mkdir();
DiskStoreFactory dsf = region.getCache().createDiskStoreFactory();
DiskStore ds1 = dsf.setDiskDirs(new File[] { dir }).create("ds1");
factory.setDiskStoreName("ds1");
dir.deleteOnExit();
region = CacheUtils.createRegion("portfolios", factory.create(), true);
simulateDeadlockScenario();
assertFalse(this.cause, this.testFailed);
assertTrue("Index creation succeeded . For diskRegion this shoudl not have happened", this.exceptionInCreatingIndex);
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class CacheXml66DUnitTest method testDiskStore.
@Test
public void testDiskStore() throws Exception {
CacheCreation cache = new CacheCreation();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
File[] dirs1 = new File[] { new File("").getAbsoluteFile() };
DiskStore ds1 = dsf.setAllowForceCompaction(true).setAutoCompact(true).setCompactionThreshold(100).setMaxOplogSize(2).setTimeInterval(10).setWriteBufferSize(15).setQueueSize(12).setDiskDirsAndSizes(dirs1, new int[] { 1024 * 20 }).create(getUniqueName() + 1);
File[] dirs2 = new File[] { new File("").getAbsoluteFile() };
DiskStore ds2 = dsf.setAllowForceCompaction(false).setAutoCompact(false).setCompactionThreshold(99).setMaxOplogSize(1).setTimeInterval(9).setWriteBufferSize(14).setQueueSize(11).setDiskDirsAndSizes(dirs2, new int[] { 1024 * 40 }).create(getUniqueName() + 2);
RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
attrs.setScope(Scope.DISTRIBUTED_ACK);
attrs.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
attrs.setDiskStoreName(getUniqueName() + 1);
attrs.setDiskSynchronous(true);
RegionCreation root = (RegionCreation) cache.createRegion("root", attrs);
{
attrs = new RegionAttributesCreation(cache);
attrs.setScope(Scope.DISTRIBUTED_ACK);
attrs.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
attrs.setDiskStoreName(getUniqueName() + 2);
Region subwithdiskstore = root.createSubregion("subwithdiskstore", attrs);
}
{
attrs = new RegionAttributesCreation(cache);
attrs.setScope(Scope.DISTRIBUTED_ACK);
attrs.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
Region subwithdefaultdiskstore = root.createSubregion("subwithdefaultdiskstore", attrs);
}
testXml(cache);
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class BackupDUnitTest method createOverflowRegion.
protected void createOverflowRegion(final VM vm) {
SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {
public void run() {
Cache cache = getCache();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(getDiskDirs(getUniqueName()));
dsf.setMaxOplogSize(1);
DiskStore ds = dsf.create(getUniqueName());
RegionFactory rf = new RegionFactory();
rf.setDiskStoreName(ds.getName());
rf.setDiskSynchronous(true);
rf.setDataPolicy(DataPolicy.REPLICATE);
rf.setEvictionAttributes(EvictionAttributes.createLIFOEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK));
rf.create("region3");
}
};
vm.invoke(createRegion);
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class PdxDeleteFieldJUnitTest method testPdxDeleteField.
@Test
public void testPdxDeleteField() throws Exception {
String DS_NAME = "PdxDeleteFieldJUnitTestDiskStore";
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
File f = new File(DS_NAME);
f.mkdir();
try {
Cache cache = (new CacheFactory(props)).setPdxPersistent(true).setPdxDiskStore(DS_NAME).create();
try {
{
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f });
dsf.create(DS_NAME);
}
RegionFactory<String, PdxValue> rf1 = cache.createRegionFactory(RegionShortcut.LOCAL_PERSISTENT);
rf1.setDiskStoreName(DS_NAME);
Region<String, PdxValue> region1 = rf1.create("region1");
PdxValue pdxValue = new PdxValue(1, 2L);
region1.put("key1", pdxValue);
byte[] pdxValueBytes = BlobHelper.serializeToBlob(pdxValue);
{
PdxValue deserializedPdxValue = (PdxValue) BlobHelper.deserializeBlob(pdxValueBytes);
assertEquals(1, deserializedPdxValue.value);
assertEquals(2L, deserializedPdxValue.fieldToDelete);
}
cache.close();
Collection<PdxType> types = DiskStoreImpl.pdxDeleteField(DS_NAME, new File[] { f }, PdxValue.class.getName(), "fieldToDelete");
assertEquals(1, types.size());
PdxType pt = types.iterator().next();
assertEquals(PdxValue.class.getName(), pt.getClassName());
assertEquals(null, pt.getPdxField("fieldToDelete"));
types = DiskStoreImpl.getPdxTypes(DS_NAME, new File[] { f });
assertEquals(1, types.size());
pt = types.iterator().next();
assertEquals(PdxValue.class.getName(), pt.getClassName());
assertEquals(true, pt.getHasDeletedField());
assertEquals(null, pt.getPdxField("fieldToDelete"));
cache = (new CacheFactory(props)).setPdxPersistent(true).setPdxDiskStore(DS_NAME).create();
{
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f });
dsf.create(DS_NAME);
PdxValue deserializedPdxValue = (PdxValue) BlobHelper.deserializeBlob(pdxValueBytes);
assertEquals(1, deserializedPdxValue.value);
assertEquals(0L, deserializedPdxValue.fieldToDelete);
}
} finally {
if (!cache.isClosed()) {
cache.close();
}
}
} finally {
FileUtils.deleteDirectory(f);
}
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class PdxRenameDUnitTest method testPdxRenameVersioning.
@Test
public void testPdxRenameVersioning() throws Exception {
final String DS_NAME = "PdxRenameDUnitTestDiskStore";
final String DS_NAME2 = "PdxRenameDUnitTestDiskStore2";
final int[] locatorPorts = AvailablePortHelper.getRandomAvailableTCPPorts(2);
final File f = new File(DS_NAME);
f.mkdir();
final File f2 = new File(DS_NAME2);
f2.mkdir();
this.filesToBeDeleted.add(DS_NAME);
this.filesToBeDeleted.add(DS_NAME2);
final Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "localhost[" + locatorPorts[0] + "],localhost[" + locatorPorts[1] + "]");
props.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
Host host = Host.getHost(0);
VM vm1 = host.getVM(0);
VM vm2 = host.getVM(1);
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
disconnectFromDS();
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[0] + "]");
final Cache cache = (new CacheFactory(props)).setPdxPersistent(true).setPdxDiskStore(DS_NAME).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f });
dsf.create(DS_NAME);
RegionFactory<String, PdxValue> rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME);
Region<String, PdxValue> region1 = rf1.create("region1");
region1.put("key1", new PdxValue(1));
return null;
}
});
vm2.invoke(new SerializableCallable() {
public Object call() throws Exception {
disconnectFromDS();
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[1] + "]");
final Cache cache = (new CacheFactory(props)).setPdxReadSerialized(true).setPdxPersistent(true).setPdxDiskStore(DS_NAME2).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f2 });
dsf.create(DS_NAME2);
RegionFactory rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME2);
Region region1 = rf1.create("region1");
Object v = region1.get("key1");
assertNotNull(v);
cache.close();
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Cache cache = CacheFactory.getAnyInstance();
if (cache != null && !cache.isClosed()) {
cache.close();
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Collection<Object> renameResults = DiskStoreImpl.pdxRename(DS_NAME, new File[] { f }, "apache", "pivotal");
assertEquals(2, renameResults.size());
for (Object o : renameResults) {
if (o instanceof PdxType) {
PdxType t = (PdxType) o;
assertEquals("org.pivotal.geode.internal.PdxRenameDUnitTest$PdxValue", t.getClassName());
} else {
EnumInfo ei = (EnumInfo) o;
assertEquals("org.pivotal.geode.internal.PdxRenameDUnitTest$Day", ei.getClassName());
}
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[0] + "]");
final Cache cache = (new CacheFactory(props)).setPdxPersistent(true).setPdxDiskStore(DS_NAME).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f });
dsf.create(DS_NAME);
RegionFactory<String, PdxValue> rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME);
Region<String, PdxValue> region1 = rf1.create("region1");
return null;
}
});
vm2.invoke(new SerializableCallable() {
public Object call() throws Exception {
disconnectFromDS();
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[1] + "]");
final Cache cache = (new CacheFactory(props)).setPdxReadSerialized(true).setPdxPersistent(true).setPdxDiskStore(DS_NAME2).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f2 });
dsf.create(DS_NAME2);
RegionFactory rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME2);
Region region1 = rf1.create("region1");
PdxInstance v = (PdxInstance) region1.get("key1");
assertNotNull(v);
assertEquals("org.pivotal.geode.internal.PdxRenameDUnitTest$PdxValue", ((PdxInstanceImpl) v).getClassName());
cache.close();
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Cache cache = CacheFactory.getAnyInstance();
if (cache != null && !cache.isClosed()) {
cache.close();
}
return null;
}
});
}
Aggregations