use of org.apache.geode.internal.cache.execute.data.Order in project geode by apache.
the class PartitionResolverDUnitTest method createRegion.
void createRegion(boolean isAccessor, int redundantCopies) {
AttributesFactory af = new AttributesFactory();
af.setScope(Scope.DISTRIBUTED_ACK);
af = new AttributesFactory();
af.setPartitionAttributes(new PartitionAttributesFactory<CustId, Customer>().setTotalNumBuckets(4).setLocalMaxMemory(isAccessor ? 0 : 1).setPartitionResolver(new CountingResolver("CountingResolverCust")).setRedundantCopies(redundantCopies).create());
getCache().createRegion(CUSTOMER, af.create());
af.setPartitionAttributes(new PartitionAttributesFactory<OrderId, Order>().setTotalNumBuckets(4).setLocalMaxMemory(isAccessor ? 0 : 1).setPartitionResolver(new CountingResolver("CountingResolverOrder")).setRedundantCopies(redundantCopies).setColocatedWith(CUSTOMER).create());
getCache().createRegion(ORDER, af.create());
}
use of org.apache.geode.internal.cache.execute.data.Order in project geode by apache.
the class DistributedTransactionDUnitTest method testRemoveAllWithTransactions.
@Test
public void testRemoveAllWithTransactions() throws Exception {
Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM server3 = host.getVM(2);
createRegions(new VM[] { server1, server2, server3 });
execute(server1, new SerializableCallable() {
@Override
public Object call() throws Exception {
Region custRegion = getCache().getRegion(CUSTOMER_PR);
Region orderRegion = getCache().getRegion(ORDER_PR);
Map custMap = new HashMap();
Map orderMap = new HashMap();
for (int i = 0; i < 15; i++) {
CustId custId = new CustId(i);
Customer customer = new Customer("customer" + i, "address" + i);
OrderId orderId = new OrderId(i, custId);
Order order = new Order("order" + i);
custMap.put(custId, customer);
orderMap.put(orderId, order);
}
CacheTransactionManager mgr = getGemfireCache().getTxManager();
mgr.setDistributed(true);
mgr.begin();
custRegion.putAll(custMap);
orderRegion.putAll(orderMap);
mgr.commit();
mgr.begin();
assertEquals(15, custRegion.size());
assertEquals(15, orderRegion.size());
custMap = new HashMap();
orderMap = new HashMap();
for (int i = 5; i < 10; i++) {
CustId custId = new CustId(i);
Customer customer = new Customer("customer" + i, "address" + i);
OrderId orderId = new OrderId(i, custId);
Order order = new Order("order" + i);
custMap.put(custId, customer);
orderMap.put(orderId, order);
}
custRegion.removeAll(custMap.keySet());
orderRegion.removeAll(orderMap.keySet());
mgr.rollback();
mgr.begin();
assertEquals(15, custRegion.size());
assertEquals(15, orderRegion.size());
custRegion.removeAll(custMap.keySet());
orderRegion.removeAll(orderMap.keySet());
assertEquals(10, custRegion.size());
assertEquals(10, orderRegion.size());
mgr.commit();
assertEquals(10, custRegion.size());
assertEquals(10, orderRegion.size());
return null;
}
});
}
use of org.apache.geode.internal.cache.execute.data.Order in project geode by apache.
the class DistributedTransactionDUnitTest method createRegions.
void createRegions(boolean accessor, int redundantCopies, InterestPolicy interestPolicy) {
AttributesFactory af = new AttributesFactory();
af.setScope(Scope.DISTRIBUTED_ACK);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
// af.setConcurrencyChecksEnabled(getConcurrencyChecksEnabled());
getCache().createRegion(D_REFERENCE, af.create());
af = new AttributesFactory();
// af.setConcurrencyChecksEnabled(getConcurrencyChecksEnabled());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
if (interestPolicy != null) {
af.setSubscriptionAttributes(new SubscriptionAttributes(interestPolicy));
}
af.setPartitionAttributes(new PartitionAttributesFactory<CustId, Customer>().setTotalNumBuckets(4).setLocalMaxMemory(accessor ? 0 : 1).setPartitionResolver(new CustomerIDPartitionResolver("resolver1")).setRedundantCopies(redundantCopies).create());
getCache().createRegion(CUSTOMER_PR, af.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setPartitionAttributes(new PartitionAttributesFactory<OrderId, Order>().setTotalNumBuckets(4).setLocalMaxMemory(accessor ? 0 : 1).setPartitionResolver(new CustomerIDPartitionResolver("resolver2")).setRedundantCopies(redundantCopies).setColocatedWith(CUSTOMER_PR).create());
getCache().createRegion(ORDER_PR, af.create());
}
use of org.apache.geode.internal.cache.execute.data.Order in project geode by apache.
the class PRTransactionDUnitTest method testPRTXPerformance.
@Test
public void testPRTXPerformance() throws Exception {
defaultStringSize = 1024;
createPopulateAndVerifyCoLocatedPRs(1);
// register functions
SerializableCallable registerPerfFunctions = new SerializableCallable("register Fn") {
public Object call() throws Exception {
Function perfFunction = new PerfFunction();
FunctionService.registerFunction(perfFunction);
Function perfTxFunction = new PerfTxFunction();
FunctionService.registerFunction(perfTxFunction);
return Boolean.TRUE;
}
};
dataStore1.invoke(registerPerfFunctions);
dataStore2.invoke(registerPerfFunctions);
dataStore3.invoke(registerPerfFunctions);
accessor.invoke(registerPerfFunctions);
SerializableCallable runPerfFunction = new SerializableCallable("runPerfFunction") {
public Object call() throws Exception {
long perfTime = 0;
Region customerPR = basicGetCache().getRegion(CustomerPartitionedRegionName);
Execution e = FunctionService.onRegion(customerPR);
// for each customer, update order and shipment
for (int iterations = 1; iterations <= totalIterations; iterations++) {
LogWriterUtils.getLogWriter().info("running perfFunction");
long startTime = 0;
ArrayList args = new ArrayList();
CustId custId = new CustId(iterations % 10);
for (int i = 1; i <= perfOrderShipmentPairs; i++) {
OrderId orderId = new OrderId(custId.getCustId().intValue() * 10 + i, custId);
Order order = new Order("NewOrder" + i + iterations);
ShipmentId shipmentId = new ShipmentId(orderId.getOrderId().intValue() * 10 + i, orderId);
Shipment shipment = new Shipment("newShipment" + i + iterations);
args.add(orderId);
args.add(order);
args.add(shipmentId);
args.add(shipment);
}
Set filter = new HashSet();
filter.add(custId);
if (iterations > warmupIterations) {
startTime = NanoTimer.getTime();
}
e.withFilter(filter).setArguments(args).execute("perfFunction").getResult();
if (startTime > 0) {
perfTime += NanoTimer.getTime() - startTime;
}
}
return new Long(perfTime);
}
};
Long perfTime = (Long) accessor.invoke(runPerfFunction);
SerializableCallable runPerfTxFunction = new SerializableCallable("runPerfTxFunction") {
public Object call() throws Exception {
long perfTime = 0;
Region customerPR = basicGetCache().getRegion(CustomerPartitionedRegionName);
Execution e = FunctionService.onRegion(customerPR);
// for each customer, update order and shipment
for (int iterations = 1; iterations <= totalIterations; iterations++) {
LogWriterUtils.getLogWriter().info("Running perfFunction");
long startTime = 0;
ArrayList args = new ArrayList();
CustId custId = new CustId(iterations % 10);
for (int i = 1; i <= perfOrderShipmentPairs; i++) {
OrderId orderId = new OrderId(custId.getCustId().intValue() * 10 + i, custId);
Order order = new Order("NewOrder" + i + iterations);
ShipmentId shipmentId = new ShipmentId(orderId.getOrderId().intValue() * 10 + i, orderId);
Shipment shipment = new Shipment("newShipment" + i + iterations);
args.add(orderId);
args.add(order);
args.add(shipmentId);
args.add(shipment);
}
Set filter = new HashSet();
filter.add(custId);
if (iterations > warmupIterations) {
startTime = NanoTimer.getTime();
}
e.withFilter(filter).setArguments(args).execute("perfTxFunction").getResult();
if (startTime > 0) {
perfTime += NanoTimer.getTime() - startTime;
}
}
return new Long(perfTime);
}
};
Long perfTxTime = (Long) accessor.invoke(runPerfTxFunction);
double diff = (perfTime.longValue() - perfTxTime.longValue()) * 1.0;
double percentDiff = (diff / perfTime.longValue()) * 100;
LogWriterUtils.getLogWriter().info((totalIterations - warmupIterations) + " iterations of function took:" + +perfTime.longValue() + " Nanos, and transaction function took:" + perfTxTime.longValue() + " Nanos, difference :" + diff + " percentDifference:" + percentDiff);
}
use of org.apache.geode.internal.cache.execute.data.Order in project geode by apache.
the class PRTransactionDUnitTest method testCacheListenerCallbacks.
@Ignore("BUG46661")
@Test
public void testCacheListenerCallbacks() {
createPopulateAndVerifyCoLocatedPRs(1);
SerializableCallable registerListeners = new SerializableCallable() {
public Object call() throws Exception {
Region custRegion = basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
custRegion.getAttributesMutator().addCacheListener(new TransactionListener2());
return null;
}
};
accessor.invoke(registerListeners);
dataStore1.invoke(registerListeners);
dataStore2.invoke(registerListeners);
dataStore3.invoke(registerListeners);
accessor.invoke(new SerializableCallable("run function") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
PartitionedRegion orderpr = (PartitionedRegion) basicGetCache().getRegion(Region.SEPARATOR + OrderPartitionedRegionName);
CustId custId = new CustId(2);
Customer newCus = new Customer("foo", "bar");
Order order = new Order("fooOrder");
OrderId orderId = new OrderId(22, custId);
ArrayList args = new ArrayList();
Function txFunction = new MyTransactionFunction();
FunctionService.registerFunction(txFunction);
Execution e = FunctionService.onRegion(pr);
Set filter = new HashSet();
boolean caughtException = false;
// test transaction non-coLocated operations
filter.clear();
args.clear();
args.add(new Integer(VERIFY_LISTENER_CALLBACK));
LogWriterUtils.getLogWriter().info("VERIFY_LISTENER_CALLBACK");
args.add(custId);
args.add(newCus);
args.add(orderId);
args.add(order);
filter.add(custId);
caughtException = false;
e.withFilter(filter).setArguments(args).execute(txFunction.getId()).getResult();
return null;
}
});
}
Aggregations