Search in sources :

Example 31 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class JCALocalTransaction method rollback.

@Override
public void rollback() throws ResourceException {
    TXStateProxy tsp = this.gfTxMgr.getTXState();
    if (tsp != null && this.tid != tsp.getTransactionId()) {
        throw new IllegalStateException("Local Transaction associated with Tid = " + this.tid + " attempting to commit a different transaction");
    }
    LogWriter logger = this.cache.getLogger();
    if (logger.fineEnabled()) {
        logger.fine("JCALocalTransaction:invoked rollback");
    }
    try {
        this.gfTxMgr.rollback();
    } catch (IllegalStateException ise) {
        // It is possible that the GFE transaction has already been rolled back.
        if (ise.getMessage().equals(LocalizedStrings.TXManagerImpl_THREAD_DOES_NOT_HAVE_AN_ACTIVE_TRANSACTION.toLocalizedString())) {
        // ignore
        } else {
            throw new ResourceException(ise);
        }
    } catch (RuntimeException e) {
        throw new ResourceException(e);
    } finally {
        this.tid = null;
    }
}
Also used : TXStateProxy(org.apache.geode.internal.cache.TXStateProxy) LogWriter(org.apache.geode.LogWriter) ResourceException(javax.resource.ResourceException)

Example 32 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class JCALocalTransaction method init.

private void init() {
    this.cache = (InternalCache) CacheFactory.getAnyInstance();
    LogWriter logger = this.cache.getLogger();
    if (logger.fineEnabled()) {
        logger.fine("JCAManagedConnection:init. Inside init");
    }
    this.gfTxMgr = this.cache.getTxManager();
    this.initDone = true;
}
Also used : LogWriter(org.apache.geode.LogWriter)

Example 33 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class ConnectionPoolDUnitTest method basicTestBridgeServerFailover.

private void basicTestBridgeServerFailover(final int cnxCount) throws CacheException {
    final String name = this.getName();
    final Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    // Create two bridge servers
    SerializableRunnable createCacheServer = new CacheSerializableRunnable("Create Cache Server") {

        public void run2() throws CacheException {
            AttributesFactory factory = getBridgeServerRegionAttributes(null, null);
            createRegion(name, factory.create());
            // pause(1000);
            try {
                startBridgeServer(0);
            } catch (Exception ex) {
                org.apache.geode.test.dunit.Assert.fail("While starting CacheServer", ex);
            }
        }
    };
    vm0.invoke(createCacheServer);
    vm1.invoke(createCacheServer);
    final int port0 = vm0.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
    final int port1 = vm1.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
    // final String host1 = getServerHostName(vm1.getHost());
    // Create one bridge client in this VM
    SerializableRunnable create = new CacheSerializableRunnable("Create region") {

        public void run2() throws CacheException {
            getLonerSystem();
            getCache();
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            factory.setConcurrencyChecksEnabled(false);
            ClientServerTestCase.configureConnectionPool(factory, host0, port0, port1, true, -1, cnxCount, null, 100);
            Region region = createRegion(name, factory.create());
            // force connections to form
            region.put("keyInit", new Integer(0));
            region.put("keyInit2", new Integer(0));
        }
    };
    vm2.invoke(create);
    // Launch async thread that puts objects into cache. This thread will execute until
    // the test has ended (which is why the RegionDestroyedException and CacheClosedException
    // are caught and ignored. If any other exception occurs, the test will fail. See
    // the putAI.exceptionOccurred() assertion below.
    AsyncInvocation putAI = vm2.invokeAsync(new CacheSerializableRunnable("Put objects") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                for (int i = 0; i < 100000; i++) {
                    region.put("keyAI", new Integer(i));
                    try {
                        Thread.sleep(100);
                    } catch (InterruptedException ie) {
                        fail("interrupted");
                    }
                }
            } catch (NoAvailableServersException ignore) {
            /* ignore */
            } catch (RegionDestroyedException e) {
            // will be thrown when the test ends
            /* ignore */
            } catch (CancelException e) {
            // will be thrown when the test ends
            /* ignore */
            }
        }
    });
    SerializableRunnable verify1Server = new CacheSerializableRunnable("verify1Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            PoolImpl pool = getPool(region);
            verifyServerCount(pool, 1);
        }
    };
    SerializableRunnable verify2Servers = new CacheSerializableRunnable("verify2Servers") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            PoolImpl pool = getPool(region);
            verifyServerCount(pool, 2);
        }
    };
    vm2.invoke(verify2Servers);
    SerializableRunnable stopCacheServer = new SerializableRunnable("Stop CacheServer") {

        public void run() {
            stopBridgeServer(getCache());
        }
    };
    final String expected = "java.io.IOException";
    final String addExpected = "<ExpectedException action=add>" + expected + "</ExpectedException>";
    final String removeExpected = "<ExpectedException action=remove>" + expected + "</ExpectedException>";
    vm2.invoke(new SerializableRunnable() {

        public void run() {
            LogWriter bgexecLogger = new LocalLogWriter(InternalLogWriter.ALL_LEVEL, System.out);
            bgexecLogger.info(addExpected);
        }
    });
    try {
        // make sure we removeExpected
        // Bounce the non-current server (I know that VM1 contains the non-current server
        // because ...
        vm1.invoke(stopCacheServer);
        vm2.invoke(verify1Server);
        final int restartPort = port1;
        vm1.invoke(new SerializableRunnable("Restart CacheServer") {

            public void run() {
                try {
                    Region region = getRootRegion().getSubregion(name);
                    assertNotNull(region);
                    startBridgeServer(restartPort);
                } catch (Exception e) {
                    getSystem().getLogWriter().fine(new Exception(e));
                    org.apache.geode.test.dunit.Assert.fail("Failed to start CacheServer", e);
                }
            }
        });
        // Pause long enough for the monitor to realize the server has been bounced
        // and reconnect to it.
        vm2.invoke(verify2Servers);
    } finally {
        vm2.invoke(new SerializableRunnable() {

            public void run() {
                LogWriter bgexecLogger = new LocalLogWriter(InternalLogWriter.ALL_LEVEL, System.out);
                bgexecLogger.info(removeExpected);
            }
        });
    }
    // Stop the other cache server
    vm0.invoke(stopCacheServer);
    // Run awhile
    vm2.invoke(verify1Server);
    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(// FIXME
    "FIXME: this thread does not terminate");
    // // Verify that no exception has occurred in the putter thread
    // join(putAI, 5 * 60 * 1000, getLogWriter());
    // //assertTrue("Exception occurred while invoking " + putAI, !putAI.exceptionOccurred());
    // if (putAI.exceptionOccurred()) {
    // fail("While putting entries: ", putAI.getException());
    // }
    // Close Pool
    vm2.invoke(new CacheSerializableRunnable("Close Pool") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.localDestroyRegion();
        }
    });
    // Stop the last cache server
    vm1.invoke(stopCacheServer);
}
Also used : NoAvailableServersException(org.apache.geode.cache.client.NoAvailableServersException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) PoolImpl(org.apache.geode.cache.client.internal.PoolImpl) LocalLogWriter(org.apache.geode.internal.logging.LocalLogWriter) NoAvailableServersException(org.apache.geode.cache.client.NoAvailableServersException) CancelException(org.apache.geode.CancelException) IOException(java.io.IOException) Endpoint(org.apache.geode.cache.client.internal.Endpoint) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) LocalLogWriter(org.apache.geode.internal.logging.LocalLogWriter) InternalLogWriter(org.apache.geode.internal.logging.InternalLogWriter) LogWriter(org.apache.geode.LogWriter) VM(org.apache.geode.test.dunit.VM) LocalRegion(org.apache.geode.internal.cache.LocalRegion) CancelException(org.apache.geode.CancelException)

Example 34 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForPRCreate.

/**
   * Creats a partiotioned region using an xml file descriptions.
   * 
   *
   * @return CacheSerializable
   *
   */
public CacheSerializableRunnable getCacheSerializableRunnableForPRCreate(final String regionName) {
    SerializableRunnable prIndexCreator = new CacheSerializableRunnable("PrRegionCreator") {

        @Override
        public void run2() {
            try {
                Cache cache = getCache();
                LogWriter logger = cache.getLogger();
                PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
                Map indexMap = region.getIndex();
                Set indexSet = indexMap.entrySet();
                Iterator it = indexSet.iterator();
                while (it.hasNext()) {
                    Map.Entry entry = (Map.Entry) it.next();
                    Index index = (Index) entry.getValue();
                    logger.info("The partitioned index created on this region " + " " + index);
                    logger.info("Current number of buckets indexed : " + "" + ((PartitionedIndex) index).getNumberOfIndexedBuckets());
                }
            } finally {
                GemFireCacheImpl.testCacheXml = null;
            }
        }
    };
    return (CacheSerializableRunnable) prIndexCreator;
}
Also used : PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) Set(java.util.Set) StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) LogWriter(org.apache.geode.LogWriter) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Iterator(java.util.Iterator) Index(org.apache.geode.cache.query.Index) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) Map(java.util.Map) Cache(org.apache.geode.cache.Cache)

Example 35 with LogWriter

use of org.apache.geode.LogWriter in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForPRIndexCreate.

/**
   * This function creates a appropriate index on a PR given the name and other parameters.
   */
public CacheSerializableRunnable getCacheSerializableRunnableForPRIndexCreate(final String prRegionName, final String indexName, final String indexedExpression, final String fromClause, final String alias) {
    SerializableRunnable prIndexCreator = new CacheSerializableRunnable("PartitionedIndexCreator") {

        @Override
        public void run2() {
            try {
                Cache cache = getCache();
                QueryService qs = cache.getQueryService();
                Region region = cache.getRegion(prRegionName);
                LogWriter logger = cache.getLogger();
                if (null != fromClause) {
                    logger.info("Test Creating index with Name : [ " + indexName + " ] " + "IndexedExpression : [ " + indexedExpression + " ] Alias : [ " + alias + " ] FromClause : [ " + fromClause + " " + alias + " ] ");
                    Index parIndex = qs.createIndex(indexName, IndexType.FUNCTIONAL, indexedExpression, fromClause);
                    logger.info("Index creted on partitioned region : " + parIndex);
                } else {
                    logger.info("Test Creating index with Name : [ " + indexName + " ] " + "IndexedExpression : [ " + indexedExpression + " ] Alias : [ " + alias + " ] FromClause : [ " + region.getFullPath() + " " + alias + " ] ");
                    Index parIndex = qs.createIndex(indexName, IndexType.FUNCTIONAL, indexedExpression, region.getFullPath() + " " + alias);
                    logger.info("Index creted on partitioned region : " + parIndex);
                    logger.info("Number of buckets indexed in the partitioned region locally : " + "" + ((PartitionedIndex) parIndex).getNumberOfIndexedBuckets() + " and remote buckets indexed : " + ((PartitionedIndex) parIndex).getNumRemoteBucketsIndexed());
                }
            /*
           * assertIndexDetailsEquals("Max num of buckets in the partiotion regions and the
           * " + "buckets indexed should be equal",
           * ((PartitionedRegion)region).getTotalNumberOfBuckets(),
           * (((PartionedIndex)parIndex).getNumberOfIndexedBucket()+((PartionedIndex)parIndex).
           * getNumRemtoeBucketsIndexed())); should put all the assetion in a seperate function.
           */
            } catch (Exception ex) {
                Assert.fail("Creating Index in this vm failed : ", ex);
            }
        }
    };
    return (CacheSerializableRunnable) prIndexCreator;
}
Also used : PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) LogWriter(org.apache.geode.LogWriter) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) Index(org.apache.geode.cache.query.Index) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) EntryExistsException(org.apache.geode.cache.EntryExistsException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) CancelException(org.apache.geode.CancelException) QueryInvalidException(org.apache.geode.cache.query.QueryInvalidException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) RegionNotFoundException(org.apache.geode.cache.query.RegionNotFoundException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) FunctionException(org.apache.geode.cache.execute.FunctionException) CacheClosedException(org.apache.geode.cache.CacheClosedException) MultiIndexCreationException(org.apache.geode.cache.query.MultiIndexCreationException) TestException(util.TestException) CacheException(org.apache.geode.cache.CacheException) ReplyException(org.apache.geode.distributed.internal.ReplyException) QueryException(org.apache.geode.cache.query.QueryException) IndexNameConflictException(org.apache.geode.cache.query.IndexNameConflictException) IndexExistsException(org.apache.geode.cache.query.IndexExistsException) Cache(org.apache.geode.cache.Cache)

Aggregations

LogWriter (org.apache.geode.LogWriter)87 Test (org.junit.Test)34 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)18 InternalDistributedSystem (org.apache.geode.distributed.internal.InternalDistributedSystem)17 Host (org.apache.geode.test.dunit.Host)17 Region (org.apache.geode.cache.Region)15 DistributedSystem (org.apache.geode.distributed.DistributedSystem)15 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)15 VM (org.apache.geode.test.dunit.VM)13 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)12 Iterator (java.util.Iterator)11 Set (java.util.Set)11 Cache (org.apache.geode.cache.Cache)11 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)11 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)10 LocalRegion (org.apache.geode.internal.cache.LocalRegion)9 Properties (java.util.Properties)8 InternalLogWriter (org.apache.geode.internal.logging.InternalLogWriter)8 IOException (java.io.IOException)7 HashSet (java.util.HashSet)7