Search in sources :

Example 6 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class TestFunction method execute2.

public void execute2(FunctionContext context) {
    if (context instanceof RegionFunctionContext) {
        RegionFunctionContext rfContext = (RegionFunctionContext) context;
        rfContext.getDataSet().getCache().getLogger().info("Executing function :  TestFunction2.execute " + rfContext);
        if (rfContext.getArguments() instanceof Boolean) {
            /* return rfContext.getArguments(); */
            if (hasResult()) {
                rfContext.getResultSender().lastResult((Serializable) rfContext.getArguments());
            } else {
                rfContext.getDataSet().getCache().getLogger().info("Executing function :  TestFunction2.execute " + rfContext);
                while (true && !rfContext.getDataSet().isDestroyed()) {
                    rfContext.getDataSet().getCache().getLogger().info("For Bug43513 ");
                    try {
                        Thread.sleep(100);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        return;
                    }
                }
            }
        } else if (rfContext.getArguments() instanceof String) {
            String key = (String) rfContext.getArguments();
            if (key.equals("TestingTimeOut")) {
                // PRFunctionExecutionDUnitTest#testRemoteMultiKeyExecution_timeout
                try {
                    Thread.sleep(2000);
                } catch (InterruptedException e) {
                    rfContext.getDataSet().getCache().getLogger().warning("Got Exception : Thread Interrupted" + e);
                }
            }
            if (PartitionRegionHelper.isPartitionedRegion(rfContext.getDataSet())) {
                /*
           * return (Serializable)PartitionRegionHelper.getLocalDataForContext(rfContext).get(key);
           */
                rfContext.getResultSender().lastResult((Serializable) PartitionRegionHelper.getLocalDataForContext(rfContext).get(key));
            } else {
                rfContext.getResultSender().lastResult((Serializable) rfContext.getDataSet().get(key));
            }
        /* return (Serializable)rfContext.getDataSet().get(key); */
        } else if (rfContext.getArguments() instanceof Set) {
            Set origKeys = (Set) rfContext.getArguments();
            ArrayList vals = new ArrayList();
            for (Object key : origKeys) {
                Object val = PartitionRegionHelper.getLocalDataForContext(rfContext).get(key);
                if (val != null) {
                    vals.add(val);
                }
            }
            rfContext.getResultSender().lastResult(vals);
        /* return vals; */
        } else if (rfContext.getArguments() instanceof HashMap) {
            HashMap putData = (HashMap) rfContext.getArguments();
            for (Iterator i = putData.entrySet().iterator(); i.hasNext(); ) {
                Map.Entry me = (Map.Entry) i.next();
                rfContext.getDataSet().put(me.getKey(), me.getValue());
            }
            rfContext.getResultSender().lastResult(Boolean.TRUE);
        } else {
            rfContext.getResultSender().lastResult(Boolean.FALSE);
        }
    } else {
        if (hasResult()) {
            context.getResultSender().lastResult(Boolean.FALSE);
        } else {
            DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
            LogWriter logger = ds.getLogWriter();
            logger.info("Executing in TestFunction on Server : " + ds.getDistributedMember() + "with Context : " + context);
            while (ds.isConnected()) {
                logger.fine("Just executing function in infinite loop for Bug43513");
                try {
                    Thread.sleep(250);
                } catch (InterruptedException e) {
                    return;
                }
            }
        }
    }
}
Also used : Serializable(java.io.Serializable) Set(java.util.Set) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) RegionFunctionContext(org.apache.geode.cache.execute.RegionFunctionContext) DistributedSystem(org.apache.geode.distributed.DistributedSystem) InternalDistributedSystem(org.apache.geode.distributed.internal.InternalDistributedSystem) LogWriter(org.apache.geode.LogWriter) Iterator(java.util.Iterator) HashMap(java.util.HashMap) Map(java.util.Map)

Example 7 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class TestFunction method executeFunctionRunningForLongTime.

public void executeFunctionRunningForLongTime(FunctionContext context) {
    DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
    LogWriter logger = ds.getLogWriter();
    try {
        Thread.sleep(2000);
    } catch (InterruptedException e) {
        logger.info("Exception in executeFunctionRunningForLongTime");
    }
    context.getResultSender().lastResult("Ran executeFunctionRunningForLongTime for 10000000");
}
Also used : LogWriter(org.apache.geode.LogWriter) DistributedSystem(org.apache.geode.distributed.DistributedSystem) InternalDistributedSystem(org.apache.geode.distributed.internal.InternalDistributedSystem)

Example 8 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class TestFunction method executeFunctionReexecuteException.

private synchronized void executeFunctionReexecuteException(FunctionContext context) {
    retryCountForExecuteFunctionReexecuteException++;
    DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
    LogWriter logger = ds.getLogWriter();
    logger.fine("Executing executeException in TestFunction on Member : " + ds.getDistributedMember() + "with Context : " + context);
    if (retryCountForExecuteFunctionReexecuteException >= 5) {
        logger.fine("Tried Function Execution 5 times. Now Returning after 5 attempts");
        context.getResultSender().lastResult(new Integer(retryCountForExecuteFunctionReexecuteException));
        retryCountForExecuteFunctionReexecuteException = 0;
        return;
    }
    if (context.getArguments() instanceof Boolean) {
        logger.fine("MyFunctionExecutionException is intentionally thrown");
        throw new FunctionInvocationTargetException(new MyFunctionExecutionException("I have been thrown from TestFunction"));
    }
}
Also used : LogWriter(org.apache.geode.LogWriter) MyFunctionExecutionException(org.apache.geode.internal.cache.execute.MyFunctionExecutionException) FunctionInvocationTargetException(org.apache.geode.cache.execute.FunctionInvocationTargetException) InternalFunctionInvocationTargetException(org.apache.geode.internal.cache.execute.InternalFunctionInvocationTargetException) DistributedSystem(org.apache.geode.distributed.DistributedSystem) InternalDistributedSystem(org.apache.geode.distributed.internal.InternalDistributedSystem)

Example 9 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class Bug39356DUnitTest method testCrashWhileCreatingABucket.

/**
   * This tests the case where the VM forcing other VMs to create a bucket crashes while creating
   * the bucket.
   */
@Test
public void testCrashWhileCreatingABucket() {
    Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);
    SerializableRunnable createParReg = new SerializableRunnable("Create parReg") {

        public void run() {
            DistributionMessageObserver.setInstance(new MyRegionObserver(vm0));
            Cache cache = getCache();
            AttributesFactory af = new AttributesFactory();
            PartitionAttributesFactory pf = new PartitionAttributesFactory();
            pf.setRedundantCopies(1);
            pf.setRecoveryDelay(0);
            af.setDataPolicy(DataPolicy.PARTITION);
            af.setPartitionAttributes(pf.create());
            cache.createRegion(REGION_NAME, af.create());
        }
    };
    vm1.invoke(createParReg);
    vm2.invoke(createParReg);
    SerializableRunnable createParRegAccessor = new SerializableRunnable("Create parReg") {

        public void run() {
            Cache cache = getCache();
            AttributesFactory af = new AttributesFactory();
            PartitionAttributesFactory pf = new PartitionAttributesFactory();
            pf.setRedundantCopies(1);
            pf.setLocalMaxMemory(0);
            af.setDataPolicy(DataPolicy.PARTITION);
            af.setPartitionAttributes(pf.create());
            Region r = cache.createRegion(REGION_NAME, af.create());
            // trigger the creation of a bucket, which should trigger the destruction of this VM.
            try {
                r.put("ping", "pong");
                fail("Should have gotten a CancelException");
            } catch (CancelException e) {
            // this is ok, we expect our observer to close this cache.
            }
        }
    };
    vm0.invoke(createParRegAccessor);
    SerializableRunnable verifyBuckets = new SerializableRunnable("Verify buckets") {

        public void run() {
            LogWriter log = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
            Cache cache = getCache();
            PartitionedRegion r = (PartitionedRegion) cache.getRegion(REGION_NAME);
            for (int i = 0; i < r.getAttributes().getPartitionAttributes().getTotalNumBuckets(); i++) {
                List owners = null;
                while (owners == null) {
                    try {
                        owners = r.getBucketOwnersForValidation(i);
                    } catch (ForceReattemptException e) {
                        log.info(Bug39356DUnitTest.class + " verify buckets Caught a ForceReattemptException");
                        Wait.pause(1000);
                    }
                }
                if (owners.isEmpty()) {
                    log.info("skipping bucket " + i + " because it has no data");
                    continue;
                }
                assertEquals("Expecting bucket " + i + " to have two copies", 2, owners.size());
                log.info("bucket " + i + " had two copies");
            }
        }
    };
    vm1.invoke(verifyBuckets);
    vm2.invoke(verifyBuckets);
}
Also used : SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) LogWriter(org.apache.geode.LogWriter) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) CancelException(org.apache.geode.CancelException) Cache(org.apache.geode.cache.Cache) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 10 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class InterestResultPolicyDUnitTest method verifyResult.

/**
   * Verifies the number of entries (including values) created on the test region at the end of
   * {@link Region#registerInterest} call depending on the type of {@link InterestResultPolicy}
   * registered for the region.
   *
   * @param interestPolicy - {@link InterestResultPolicy} registered for the region
   */
public static void verifyResult(Object interestPolicy, Object totalKeysRegistered) {
    Region region1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
    int entriesSize = region1.entrySet(false).size();
    int keysSize = region1.keySet().size();
    int valuesSize = region1.values().size();
    InterestResultPolicy policy = (InterestResultPolicy) interestPolicy;
    LogWriter logger = cache.getLogger();
    logger.fine("policy = " + policy + " ==> entries = " + entriesSize + " ;keys = " + keysSize + ";values = " + valuesSize);
    if (policy.isNone()) {
        // nothing should be created on client cache
        assertEquals(0, entriesSize);
        assertEquals(0, keysSize);
        assertEquals(0, valuesSize);
    } else if (policy.isKeys()) {
        // all keys should be created with values null
        assertEquals(PREPOPULATED_ENTRIES, entriesSize);
        assertEquals(PREPOPULATED_ENTRIES, keysSize);
        assertEquals(0, valuesSize);
    } else if (policy.isKeysValues()) {
        // all the keys and values should be created
        assertEquals(PREPOPULATED_ENTRIES, entriesSize);
        assertEquals(PREPOPULATED_ENTRIES, keysSize);
        assertEquals(PREPOPULATED_ENTRIES, valuesSize);
    }
}
Also used : InterestResultPolicy(org.apache.geode.cache.InterestResultPolicy) LogWriter(org.apache.geode.LogWriter) Region(org.apache.geode.cache.Region)

Aggregations

LogWriter (org.apache.geode.LogWriter)87 Test (org.junit.Test)34 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)18 InternalDistributedSystem (org.apache.geode.distributed.internal.InternalDistributedSystem)17 Host (org.apache.geode.test.dunit.Host)17 Region (org.apache.geode.cache.Region)15 DistributedSystem (org.apache.geode.distributed.DistributedSystem)15 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)15 VM (org.apache.geode.test.dunit.VM)13 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)12 Iterator (java.util.Iterator)11 Set (java.util.Set)11 Cache (org.apache.geode.cache.Cache)11 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)11 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)10 LocalRegion (org.apache.geode.internal.cache.LocalRegion)9 Properties (java.util.Properties)8 InternalLogWriter (org.apache.geode.internal.logging.InternalLogWriter)8 IOException (java.io.IOException)7 HashSet (java.util.HashSet)7