use of org.apache.geode.test.dunit.SerializableCallable in project geode by apache.
the class ClusterConfigurationServiceEndToEndDUnitTest method shutdownAll.
private void shutdownAll() throws IOException {
VM locatorAndMgr = getHost(0).getVM(3);
locatorAndMgr.invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
InternalCache cache = (InternalCache) CacheFactory.getAnyInstance();
ShutdownAllRequest.send(cache.getInternalDistributedSystem().getDistributionManager(), -1);
return null;
}
});
locatorAndMgr.invoke(SharedConfigurationTestUtils.cleanupLocator);
// Clean up the directories
if (!serverNames.isEmpty()) {
for (String serverName : serverNames) {
final File serverDir = new File(serverName);
FileUtils.cleanDirectory(serverDir);
FileUtils.deleteDirectory(serverDir);
}
}
serverNames.clear();
}
use of org.apache.geode.test.dunit.SerializableCallable in project geode by apache.
the class CopyOnReadIndexDUnitTest method helpTestPRQueryOnLocalNode.
// The tests sets up a partition region across 2 servers
// It does puts in each server, checking instance counts of portfolio objects
// Querying the data will result in deserialization of portfolio objects.
// In cases where index is present, the objects will be deserialized in the cache
public void helpTestPRQueryOnLocalNode(final String queryString, final int numPortfolios, final int numExpectedResults, final boolean hasIndex) throws Exception {
final int numPortfoliosPerVM = numPortfolios / 2;
resetInstanceCount(vm0);
resetInstanceCount(vm1);
createPartitionRegion(vm0, "portfolios");
createPartitionRegion(vm1, "portfolios");
if (hasIndex) {
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
QueryTestUtils utils = new QueryTestUtils();
utils.createIndex("idIndex", "p.ID", "/portfolios p");
return null;
}
});
}
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = 0; i < numPortfoliosPerVM; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = numPortfoliosPerVM; i < numPortfolios; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
// PR indexes are created across nodes unlike Replicated Region Indexes
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
if (index == null) {
QueryTestUtils utils = new QueryTestUtils();
index = utils.createIndex("idIndex", "p.ID", "/portfolios p");
}
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
Iterator it = results.iterator();
assertEquals("Failed:" + queryString, numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
p.status = "discardStatus";
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
p.status = "discardStatus";
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// Since we have deserialized and cached these values, we just need to add the number of
// results we did a copy of due to copy on read
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM + numExpectedResults), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to the query we deserialized the number of entries this vm currently hosts
// We had to deserialized the results from the other data nodes when we iterated through
// the results as well as our own
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
if (hasIndex) {
// After vm0 executed the query, we already had the values deserialized in our cache
// So it's the same total as before
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
} else {
// After vm0 executed the query, we had to deserialize the values in our vm
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numExpectedResults + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
// Because we have no index, we have to again deserialize all the values that this vm is
// hosting
Wait.waitForCriterion(verifyPortfolioCount((int) (((PartitionedRegion) region).getLocalSize() + ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numExpectedResults + numPortfoliosPerVM)), 5000, 200, true);
}
return null;
}
});
destroyRegion("portfolio", vm0);
}
use of org.apache.geode.test.dunit.SerializableCallable in project geode by apache.
the class ParallelSnapshotDUnitTest method loadCache.
public void loadCache() throws Exception {
SerializableCallable setup = new SerializableCallable() {
@Override
public Object call() throws Exception {
CacheFactory cf = new CacheFactory().setPdxSerializer(new MyPdxSerializer());
Cache cache = getCache(cf);
RegionGenerator rgen = new RegionGenerator();
rgen.createRegion(cache, null, RegionType.PARTITION, "test");
return null;
}
};
forEachVm(setup, true);
}
use of org.apache.geode.test.dunit.SerializableCallable in project geode by apache.
the class ParallelSnapshotDUnitTest method doExport.
private void doExport(boolean explode) throws Exception {
Region region = getCache().getRegion("test");
for (int i = 0; i < 1000; i++) {
region.put(i, ffff);
}
RegionSnapshotService rss = region.getSnapshotService();
final TestSnapshotFileMapper mapper = new TestSnapshotFileMapper();
mapper.explode = explode;
SnapshotOptionsImpl opt = (SnapshotOptionsImpl) rss.createOptions();
opt.setParallelMode(true);
opt.setMapper(mapper);
final File f = new File("mysnap");
rss.save(f, SnapshotFormat.GEMFIRE, opt);
mapper.explode = false;
SerializableCallable check = new SerializableCallable() {
@Override
public Object call() throws Exception {
getCache().getDistributedSystem().getDistributedMember();
File snap = mapper.mapExportPath(getCache().getDistributedSystem().getDistributedMember(), f);
assertTrue("Could not find snapshot: " + snap, snap.exists());
return null;
}
};
forEachVm(check, true);
}
use of org.apache.geode.test.dunit.SerializableCallable in project geode by apache.
the class SnapshotByteArrayDUnitTest method testImportByteArray.
@Test
public void testImportByteArray() throws Exception {
SerializableCallable load = new SerializableCallable() {
@Override
public Object call() throws Exception {
Region region = getCache().getRegion("snapshot-ops");
for (int i = 0; i < 1000; i++) {
region.put(i, new byte[] { 0xf });
}
region.getSnapshotService().save(snap, SnapshotFormat.GEMFIRE);
region.getSnapshotService().load(snap, SnapshotFormat.GEMFIRE);
return null;
}
};
Host.getHost(0).getVM(1).invoke(load);
SerializableCallable callback = new SerializableCallable() {
@Override
public Object call() throws Exception {
Region region = getCache().getRegion("snapshot-ops");
region.getAttributesMutator().addCacheListener(new CacheListenerAdapter<Integer, Object>() {
@Override
public void afterUpdate(EntryEvent<Integer, Object> event) {
dump(event);
}
@Override
public void afterInvalidate(EntryEvent<Integer, Object> event) {
dump(event);
}
@Override
public void afterDestroy(EntryEvent<Integer, Object> event) {
dump(event);
}
@Override
public void afterCreate(EntryEvent<Integer, Object> event) {
}
private void dump(EntryEvent<Integer, Object> event) {
LogWriterUtils.getLogWriter().info("op = " + event.getOperation());
Object obj1 = event.getNewValue();
LogWriterUtils.getLogWriter().info("new = " + obj1);
Object obj2 = event.getOldValue();
LogWriterUtils.getLogWriter().info("old = " + obj2);
}
});
return null;
}
};
SnapshotDUnitTest.forEachVm(callback, true);
Region region = getCache().getRegion("snapshot-ops");
for (int i = 0; i < 1000; i++) {
region.put(i, new byte[] { 0x0, 0x1, 0x3 });
region.invalidate(i);
region.destroy(i);
}
}
Aggregations