use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class PdxDeleteFieldDUnitTest method testPdxDeleteFieldVersioning.
@Test
public void testPdxDeleteFieldVersioning() throws Exception {
final String DS_NAME = "PdxDeleteFieldDUnitTestDiskStore";
final String DS_NAME2 = "PdxDeleteFieldDUnitTestDiskStore2";
final Properties props = new Properties();
final int[] locatorPorts = AvailablePortHelper.getRandomAvailableTCPPorts(2);
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "localhost[" + locatorPorts[0] + "],localhost[" + locatorPorts[1] + "]");
props.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
final File f = new File(DS_NAME);
f.mkdir();
final File f2 = new File(DS_NAME2);
f2.mkdir();
this.filesToBeDeleted.add(DS_NAME);
this.filesToBeDeleted.add(DS_NAME2);
Host host = Host.getHost(0);
VM vm1 = host.getVM(0);
VM vm2 = host.getVM(1);
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
disconnectFromDS();
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[0] + "]");
final Cache cache = (new CacheFactory(props)).setPdxPersistent(true).setPdxDiskStore(DS_NAME).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f });
dsf.create(DS_NAME);
RegionFactory<String, PdxValue> rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME);
Region<String, PdxValue> region1 = rf1.create("region1");
region1.put("key1", new PdxValue(1, 2L));
return null;
}
});
vm2.invoke(new SerializableCallable() {
public Object call() throws Exception {
disconnectFromDS();
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[1] + "]");
final Cache cache = (new CacheFactory(props)).setPdxReadSerialized(true).setPdxPersistent(true).setPdxDiskStore(DS_NAME2).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f2 });
dsf.create(DS_NAME2);
RegionFactory rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME2);
Region region1 = rf1.create("region1");
Object v = region1.get("key1");
assertNotNull(v);
cache.close();
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Cache cache = CacheFactory.getAnyInstance();
if (cache != null && !cache.isClosed()) {
cache.close();
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Collection<PdxType> types = DiskStoreImpl.pdxDeleteField(DS_NAME, new File[] { f }, PdxValue.class.getName(), "fieldToDelete");
assertEquals(1, types.size());
PdxType pt = types.iterator().next();
assertEquals(PdxValue.class.getName(), pt.getClassName());
assertEquals(null, pt.getPdxField("fieldToDelete"));
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[0] + "]");
final Cache cache = (new CacheFactory(props)).setPdxPersistent(true).setPdxDiskStore(DS_NAME).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f });
dsf.create(DS_NAME);
RegionFactory<String, PdxValue> rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME);
Region<String, PdxValue> region1 = rf1.create("region1");
return null;
}
});
vm2.invoke(new SerializableCallable() {
public Object call() throws Exception {
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[1] + "]");
final Cache cache = (new CacheFactory(props)).setPdxReadSerialized(true).setPdxPersistent(true).setPdxDiskStore(DS_NAME2).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f2 });
dsf.create(DS_NAME2);
RegionFactory rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME2);
Region region1 = rf1.create("region1");
PdxInstance v = (PdxInstance) region1.get("key1");
assertNotNull(v);
assertEquals(1, v.getField("value"));
assertEquals(null, v.getField("fieldToDelete"));
cache.close();
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Cache cache = CacheFactory.getAnyInstance();
if (cache != null && !cache.isClosed()) {
cache.close();
}
return null;
}
});
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class BackupDUnitTest method createPersistentRegionAsync.
protected AsyncInvocation createPersistentRegionAsync(final VM vm) {
SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {
public void run() {
Cache cache = getCache();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(getDiskDirs(getUniqueName()));
dsf.setMaxOplogSize(1);
DiskStore ds = dsf.create(getUniqueName());
RegionFactory rf = new RegionFactory();
rf.setDiskStoreName(ds.getName());
rf.setDiskSynchronous(true);
rf.setDataPolicy(getDataPolicy());
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
rf.setPartitionAttributes(paf.create());
rf.create("region1");
dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(getDiskDirs(getUniqueName() + 2));
dsf.setMaxOplogSize(1);
ds = dsf.create(getUniqueName() + 2);
rf.setDiskStoreName(getUniqueName() + 2);
rf.create("region2");
}
};
return vm.invokeAsync(createRegion);
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class BackupJUnitTest method createDiskStore.
private DiskStoreImpl createDiskStore() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(diskDirs);
dsf.setMaxOplogSize(1);
String name = "diskStore";
return (DiskStoreImpl) dsf.create(name);
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskStoreFactoryJUnitTest method testMissingInitFile.
@Test
public void testMissingInitFile() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
String name = "testMissingInitFile";
DiskStore diskStore = dsf.create(name);
File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + DiskInitFile.IF_FILE_EXT);
assertTrue(ifFile.exists());
AttributesFactory af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
cache.createRegion("r", af.create());
cache.close();
assertTrue(ifFile.exists());
assertTrue(ifFile.delete());
assertFalse(ifFile.exists());
cache = createCache();
dsf = cache.createDiskStoreFactory();
assertEquals(null, ((GemFireCacheImpl) cache).findDiskStore(name));
try {
dsf.create(name);
fail("expected IllegalStateException");
} catch (IllegalStateException expected) {
}
// if test passed clean up files
removeFiles(diskStore);
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskStoreFactoryJUnitTest method testWriteBufferSize.
@Test
public void testWriteBufferSize() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
String name = "testWriteBufferSize";
DiskStore ds = dsf.setWriteBufferSize(0).create(name);
assertEquals(0, ds.getWriteBufferSize());
name = "testWriteBufferSize2";
ds = dsf.setWriteBufferSize(Integer.MAX_VALUE).create(name);
assertEquals(Integer.MAX_VALUE, ds.getWriteBufferSize());
// check illegal stuff
try {
dsf.setWriteBufferSize(-1);
fail("expected IllegalArgumentException");
} catch (IllegalArgumentException expected) {
}
}
Aggregations