use of org.apache.geode.cache.DiskStore in project geode by apache.
the class OplogJUnitTest method testPreblowErrorCondition.
/**
* Tests if the preblowing of a file with size greater than the disk space available so that
* preblowing results in IOException , is able to recover without problem
*
*/
// Now we preallocate spaces for if files and also crfs and drfs. So the below test is not valid
// any more. See revision: r42359 and r42320. So disabling this test.
@Ignore("TODO: test is disabled")
@Test
public void testPreblowErrorCondition() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
((DiskStoreFactoryImpl) dsf).setMaxOplogSizeInBytes(100000000L * 1024L * 1024L * 1024L);
dsf.setAutoCompact(false);
File dir = new File("testingDirectoryDefault");
dir.mkdir();
dir.deleteOnExit();
File[] dirs = { dir };
int[] size = new int[] { Integer.MAX_VALUE };
dsf.setDiskDirsAndSizes(dirs, size);
AttributesFactory factory = new AttributesFactory();
logWriter.info("<ExpectedException action=add>" + "Could not pregrow" + "</ExpectedException>");
try {
DiskStore ds = dsf.create("test");
factory.setDiskStoreName(ds.getName());
factory.setDiskSynchronous(true);
factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
factory.setScope(Scope.LOCAL);
try {
region = cache.createVMRegion("test", factory.createRegionAttributes());
} catch (Exception e1) {
logWriter.error("Test failed due to exception", e1);
fail("Test failed due to exception " + e1);
}
region.put("key1", new byte[900]);
byte[] val = null;
try {
val = (byte[]) ((LocalRegion) region).getValueOnDisk("key1");
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail(e.toString());
}
assertTrue(val.length == 900);
} finally {
logWriter.info("<ExpectedException action=remove>" + "Could not pregrow" + "</ExpectedException>");
}
closeDown();
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testColocatedPRAttributes.
@Test
public void testColocatedPRAttributes() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(1);
vm0.invoke(new SerializableRunnable("create") {
public void run() {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
// Create Persistent region
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion("persistentLeader", af.create());
af.setDataPolicy(DataPolicy.PARTITION);
af.setDiskStoreName(null);
cache.createRegion("nonPersistentLeader", af.create());
// Create a non persistent PR
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
paf.setColocatedWith("nonPersistentLeader");
af.setPartitionAttributes(paf.create());
// Try to colocate a persistent PR with the non persistent PR. This should fail.
IgnoredException exp = IgnoredException.addIgnoredException("IllegalStateException");
try {
cache.createRegion("colocated", af.create());
fail("should not have been able to create a persistent region colocated with a non persistent region");
} catch (IllegalStateException expected) {
// do nothing
} finally {
exp.remove();
}
// Try to colocate a persistent PR with another persistent PR. This should work.
paf.setColocatedWith("persistentLeader");
af.setPartitionAttributes(paf.create());
cache.createRegion("colocated", af.create());
// We should also be able to colocate a non persistent region with a persistent region.
af.setDataPolicy(DataPolicy.PARTITION);
af.setDiskStoreName(null);
paf.setColocatedWith("persistentLeader");
af.setPartitionAttributes(paf.create());
cache.createRegion("colocated2", af.create());
}
});
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testRecoverySystemWithConcurrentPutter.
/**
* Test what happens when we restart persistent members while there is an accessor concurrently
* performing puts. This is for bug 43899
*/
@Test
public void testRecoverySystemWithConcurrentPutter() throws Throwable {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
// Define all of the runnables used in this test
// runnable to create accessors
SerializableRunnable createAccessor = new SerializableRunnable("createAccessor") {
public void run() {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setLocalMaxMemory(0);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PARTITION);
cache.createRegion(PR_REGION_NAME, af.create());
paf.setColocatedWith(PR_REGION_NAME);
af.setPartitionAttributes(paf.create());
cache.createRegion("region2", af.create());
}
};
// runnable to create PRs
SerializableRunnable createPRs = new SerializableRunnable("createPRs") {
public void run() {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
paf.setColocatedWith(PR_REGION_NAME);
af.setPartitionAttributes(paf.create());
cache.createRegion("region2", af.create());
}
};
// runnable to close the cache.
SerializableRunnable closeCache = new SerializableRunnable("closeCache") {
public void run() {
closeCache();
}
};
// Runnable to do a bunch of puts handle exceptions
// due to the fact that member is offline.
SerializableRunnable doABunchOfPuts = new SerializableRunnable("doABunchOfPuts") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(PR_REGION_NAME);
try {
for (int i = 0; ; i++) {
try {
region.get(i % NUM_BUCKETS);
} catch (PartitionOfflineException expected) {
// do nothing.
} catch (PartitionedRegionStorageException expected) {
// do nothing.
}
Thread.yield();
}
} catch (CacheClosedException expected) {
// ok, we're done.
}
}
};
// Runnable to clean up disk dirs on a members
SerializableRunnable cleanDiskDirs = new SerializableRunnable("Clean disk dirs") {
public void run() {
try {
cleanDiskDirs();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
// Create the PR two members
vm1.invoke(createPRs);
vm2.invoke(createPRs);
// create the accessor.
vm0.invoke(createAccessor);
// Create some buckets.
createData(vm0, 0, NUM_BUCKETS, "a");
createData(vm0, 0, NUM_BUCKETS, "a", "region2");
// backup the system. We use this to get a snapshot of vm1 and vm2
// when they both are online. Recovering from this backup simulates
// a simulataneous kill and recovery.
backup(vm3);
// close vm1 and vm2.
vm1.invoke(closeCache);
vm2.invoke(closeCache);
// restore the backup
vm1.invoke(cleanDiskDirs);
vm2.invoke(cleanDiskDirs);
restoreBackup(2);
// in vm0, start doing a bunch of concurrent puts.
AsyncInvocation async0 = vm0.invokeAsync(doABunchOfPuts);
// This recovery should not hang (that's what we're testing for
// here.
AsyncInvocation async1 = vm1.invokeAsync(createPRs);
AsyncInvocation async2 = vm2.invokeAsync(createPRs);
async1.getResult(MAX_WAIT);
async2.getResult(MAX_WAIT);
// close the cache in vm0 to stop the async puts.
vm0.invoke(closeCache);
// make sure we didn't get an exception
async0.getResult(MAX_WAIT);
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testColocatedPRsRecoveryOnePRAtATime.
/**
* Testing what happens we we recreate colocated persistent PRs by creating one PR everywhere and
* then the other PR everywhere.
*/
@Test
public void testColocatedPRsRecoveryOnePRAtATime() throws Throwable {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
public void run() {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
}
};
SerializableRunnable createChildPR = getCreateChildPRRunnable();
vm0.invoke(createParentPR);
vm1.invoke(createParentPR);
vm2.invoke(createParentPR);
vm0.invoke(createChildPR);
vm1.invoke(createChildPR);
vm2.invoke(createChildPR);
createData(vm0, 0, NUM_BUCKETS, "a");
createData(vm0, 0, NUM_BUCKETS, "b", "region2");
Set<Integer> vm0Buckets = getBucketList(vm0, PR_REGION_NAME);
assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
Set<Integer> vm1Buckets = getBucketList(vm1, PR_REGION_NAME);
assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
Set<Integer> vm2Buckets = getBucketList(vm2, PR_REGION_NAME);
assertEquals(vm2Buckets, getBucketList(vm2, "region2"));
Set<Integer> vm0PrimaryBuckets = getPrimaryBucketList(vm0, PR_REGION_NAME);
assertEquals(vm0PrimaryBuckets, getPrimaryBucketList(vm0, "region2"));
Set<Integer> vm1PrimaryBuckets = getPrimaryBucketList(vm1, PR_REGION_NAME);
assertEquals(vm1PrimaryBuckets, getPrimaryBucketList(vm1, "region2"));
Set<Integer> vm2PrimaryBuckets = getPrimaryBucketList(vm2, PR_REGION_NAME);
assertEquals(vm2PrimaryBuckets, getPrimaryBucketList(vm2, "region2"));
closeCache(vm0);
closeCache(vm1);
closeCache(vm2);
AsyncInvocation async0 = vm0.invokeAsync(createParentPR);
AsyncInvocation async1 = vm1.invokeAsync(createParentPR);
AsyncInvocation async2 = vm2.invokeAsync(createParentPR);
async0.getResult(MAX_WAIT);
async1.getResult(MAX_WAIT);
async2.getResult(MAX_WAIT);
vm0.invoke(createChildPR);
vm1.invoke(createChildPR);
vm2.invoke(createChildPR);
Wait.pause(4000);
assertEquals(vm0Buckets, getBucketList(vm0, PR_REGION_NAME));
assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
assertEquals(vm1Buckets, getBucketList(vm1, PR_REGION_NAME));
assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
assertEquals(vm2Buckets, getBucketList(vm2, PR_REGION_NAME));
assertEquals(vm2Buckets, getBucketList(vm2, "region2"));
// primary can differ
vm0PrimaryBuckets = getPrimaryBucketList(vm0, PR_REGION_NAME);
assertEquals(vm0PrimaryBuckets, getPrimaryBucketList(vm0, "region2"));
vm1PrimaryBuckets = getPrimaryBucketList(vm1, PR_REGION_NAME);
assertEquals(vm1PrimaryBuckets, getPrimaryBucketList(vm1, "region2"));
vm2PrimaryBuckets = getPrimaryBucketList(vm2, PR_REGION_NAME);
assertEquals(vm2PrimaryBuckets, getPrimaryBucketList(vm2, "region2"));
checkData(vm0, 0, NUM_BUCKETS, "a");
// region 2 didn't have persistent data, so it nothing should be recovered
checkData(vm0, 0, NUM_BUCKETS, null, "region2");
// Make sure can do a put in all of the buckets in vm2
createData(vm0, 0, NUM_BUCKETS, "c", "region2");
// Now all of those buckets should exist
checkData(vm0, 0, NUM_BUCKETS, "c", "region2");
// Now all the buckets should be restored in the appropriate places.
assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
assertEquals(vm1Buckets, getBucketList(vm1, "region2"));
assertEquals(vm2Buckets, getBucketList(vm2, "region2"));
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testCrashDuringRedundancySatisfaction.
/**
* Test what happens when we crash in the middle of satisfying redundancy for a colocated bucket.
*
* @throws Throwable
*/
// This test method is disabled because it is failing
// periodically and causing cruise control failures
// See bug #46748
@Test
public void testCrashDuringRedundancySatisfaction() throws Throwable {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
SerializableRunnable createPRs = new SerializableRunnable("region1") {
public void run() {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
// Workaround for 44414 - disable recovery delay so we shutdown
// vm1 at a predictable point.
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
paf.setColocatedWith(PR_REGION_NAME);
af.setPartitionAttributes(paf.create());
cache.createRegion("region2", af.create());
}
};
// Create the PR on vm0
vm0.invoke(createPRs);
// Create some buckets.
createData(vm0, 0, NUM_BUCKETS, "a");
createData(vm0, 0, NUM_BUCKETS, "a", "region2");
vm1.invoke(createPRs);
// We shouldn't have created any buckets in vm1 yet.
assertEquals(Collections.emptySet(), getBucketList(vm1));
// Add an observer that will disconnect before allowing the peer to
// GII a colocated bucket. This should leave the peer with only the parent
// bucket
vm0.invoke(new SerializableRunnable() {
public void run() {
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
if (message instanceof RequestImageMessage) {
if (((RequestImageMessage) message).regionPath.contains("region2")) {
DistributionMessageObserver.setInstance(null);
disconnectFromDS();
}
}
}
});
}
});
IgnoredException ex = IgnoredException.addIgnoredException("PartitionOfflineException", vm1);
try {
// as we satisfy redundancy with vm1.
try {
RebalanceResults rr = rebalance(vm1);
} catch (Exception expected) {
// disconnect
if (!(expected.getCause() instanceof PartitionOfflineException)) {
throw expected;
}
}
// Wait for vm0 to be closed by the callback
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Wait.waitForCriterion(new WaitCriterion() {
public boolean done() {
InternalDistributedSystem ds = basicGetSystem();
return ds == null || !ds.isConnected();
}
public String description() {
return "DS did not disconnect";
}
}, MAX_WAIT, 100, true);
return null;
}
});
// close the cache in vm1
SerializableCallable disconnectFromDS = new SerializableCallable() {
public Object call() throws Exception {
disconnectFromDS();
return null;
}
};
vm1.invoke(disconnectFromDS);
// Make sure vm0 is disconnected. This avoids a race where we
// may still in the process of disconnecting even though the our async listener
// found the system was disconnected
vm0.invoke(disconnectFromDS);
} finally {
ex.remove();
}
// Create the cache and PRs on both members
AsyncInvocation async0 = vm0.invokeAsync(createPRs);
AsyncInvocation async1 = vm1.invokeAsync(createPRs);
async0.getResult(MAX_WAIT);
async1.getResult(MAX_WAIT);
// Make sure the data was recovered correctly
checkData(vm0, 0, NUM_BUCKETS, "a");
// Workaround for bug 46748.
checkData(vm0, 0, NUM_BUCKETS, "a", "region2");
}
Aggregations