use of org.apache.geode.internal.cache.TXStateProxy in project geode by apache.
the class BaseCommand method execute.
@Override
public void execute(Message clientMessage, ServerConnection serverConnection) {
// Read the request and update the statistics
long start = DistributionStats.getStatTime();
if (EntryLogger.isEnabled() && serverConnection != null) {
EntryLogger.setSource(serverConnection.getMembershipID(), "c2s");
}
boolean shouldMasquerade = shouldMasqueradeForTx(clientMessage, serverConnection);
try {
if (shouldMasquerade) {
InternalCache cache = serverConnection.getCache();
InternalDistributedMember member = (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
TXManagerImpl txMgr = cache.getTxManager();
TXStateProxy tx = null;
try {
tx = txMgr.masqueradeAs(clientMessage, member, false);
cmdExecute(clientMessage, serverConnection, start);
tx.updateProxyServer(txMgr.getMemberId());
} finally {
txMgr.unmasquerade(tx);
}
} else {
cmdExecute(clientMessage, serverConnection, start);
}
} catch (TransactionException | CopyException | SerializationException | CacheWriterException | CacheLoaderException | GemFireSecurityException | PartitionOfflineException | MessageTooLargeException e) {
handleExceptionNoDisconnect(clientMessage, serverConnection, e);
} catch (EOFException eof) {
BaseCommand.handleEOFException(clientMessage, serverConnection, eof);
} catch (InterruptedIOException e) {
// Solaris only
BaseCommand.handleInterruptedIOException(serverConnection, e);
} catch (IOException e) {
BaseCommand.handleIOException(clientMessage, serverConnection, e);
} catch (DistributedSystemDisconnectedException e) {
BaseCommand.handleShutdownException(clientMessage, serverConnection, e);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable e) {
BaseCommand.handleThrowable(clientMessage, serverConnection, e);
} finally {
EntryLogger.clearSource();
}
}
use of org.apache.geode.internal.cache.TXStateProxy in project geode by apache.
the class CommitCommand method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
serverConnection.setAsTrue(REQUIRES_RESPONSE);
TXManagerImpl txMgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
InternalDistributedMember client = (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
int uniqId = clientMessage.getTransactionId();
TXId txId = new TXId(client, uniqId);
TXCommitMessage commitMsg = null;
if (txMgr.isHostedTxRecentlyCompleted(txId)) {
commitMsg = txMgr.getRecentlyCompletedMessage(txId);
if (logger.isDebugEnabled()) {
logger.debug("TX: returning a recently committed txMessage for tx: {}", txId);
}
if (!txMgr.isExceptionToken(commitMsg)) {
writeCommitResponse(commitMsg, clientMessage, serverConnection);
// fixes bug 46529
commitMsg.setClientVersion(null);
serverConnection.setAsTrue(RESPONDED);
} else {
sendException(clientMessage, serverConnection, txMgr.getExceptionForToken(commitMsg, txId));
}
txMgr.removeHostedTXState(txId);
return;
}
// fixes bug 43350
boolean wasInProgress = txMgr.setInProgress(true);
final TXStateProxy txProxy = txMgr.getTXState();
Assert.assertTrue(txProxy != null);
if (logger.isDebugEnabled()) {
logger.debug("TX: committing client tx: {}", txId);
}
try {
txId = txProxy.getTxId();
txProxy.setCommitOnBehalfOfRemoteStub(true);
txMgr.commit();
commitMsg = txProxy.getCommitMessage();
writeCommitResponse(commitMsg, clientMessage, serverConnection);
serverConnection.setAsTrue(RESPONDED);
} catch (Exception e) {
sendException(clientMessage, serverConnection, e);
} finally {
if (txId != null) {
txMgr.removeHostedTXState(txId);
}
if (!wasInProgress) {
txMgr.setInProgress(false);
}
if (commitMsg != null) {
// fixes bug 46529
commitMsg.setClientVersion(null);
}
}
}
use of org.apache.geode.internal.cache.TXStateProxy in project geode by apache.
the class MultiVMRegionTestCase method testTXUpdateLoadNoConflict.
/**
* Tests that the push of a loaded value does not cause a conflict on the side receiving the
* update
*/
@Ignore("TODO: this test always hits early out")
@Test
public void testTXUpdateLoadNoConflict() throws Exception {
/*
* this no longer holds true - we have load conflicts now
*
*/
if (true) {
return;
}
assumeTrue(supportsTransactions());
assumeFalse(getRegionAttributes().getScope().isGlobal());
assumeFalse(getRegionAttributes().getDataPolicy().withPersistence());
assertTrue(getRegionAttributes().getScope().isDistributed());
CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
final String rgnName = getUniqueName();
SerializableRunnable create = new SerializableRunnable("testTXUpdateLoadNoConflict: Create Region & Load value") {
@Override
public void run() {
CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
MyTransactionListener tl = new MyTransactionListener();
txMgr2.addListener(tl);
try {
Region rgn = createRegion(rgnName);
AttributesMutator mutator = rgn.getAttributesMutator();
mutator.setCacheLoader(new CacheLoader() {
int count = 0;
@Override
public Object load(LoaderHelper helper) throws CacheLoaderException {
count++;
return "LV " + count;
}
@Override
public void close() {
}
});
Object value = rgn.get("key");
assertEquals("LV 1", value);
getSystem().getLogWriter().info("testTXUpdateLoadNoConflict: loaded Key");
flushIfNecessary(rgn);
} catch (CacheException e) {
fail("While creating region", e);
}
}
};
VM vm0 = Host.getHost(0).getVM(0);
try {
MyTransactionListener tl = new MyTransactionListener();
txMgr.addListener(tl);
AttributesFactory rgnAtts = new AttributesFactory(getRegionAttributes());
rgnAtts.setDataPolicy(DataPolicy.REPLICATE);
Region rgn = createRegion(rgnName, rgnAtts.create());
txMgr.begin();
TransactionId myTXId = txMgr.getTransactionId();
rgn.create("key", "txValue");
vm0.invoke(create);
{
TXStateProxy tx = ((TXManagerImpl) txMgr).internalSuspend();
assertTrue(rgn.containsKey("key"));
assertEquals("LV 1", rgn.getEntry("key").getValue());
((TXManagerImpl) txMgr).internalResume(tx);
}
// make sure transactional view is still correct
assertEquals("txValue", rgn.getEntry("key").getValue());
txMgr.commit();
getSystem().getLogWriter().info("testTXUpdateLoadNoConflict: did commit");
assertEquals("txValue", rgn.getEntry("key").getValue());
{
Collection events = tl.lastEvent.getCreateEvents();
assertEquals(1, events.size());
EntryEvent ev = (EntryEvent) events.iterator().next();
assertEquals(myTXId, ev.getTransactionId());
assertTrue(ev.getRegion() == rgn);
assertEquals("key", ev.getKey());
assertEquals("txValue", ev.getNewValue());
assertEquals(null, ev.getOldValue());
assertTrue(!ev.getOperation().isLocalLoad());
assertTrue(!ev.getOperation().isNetLoad());
assertTrue(!ev.getOperation().isLoad());
assertTrue(!ev.getOperation().isNetSearch());
assertTrue(!ev.getOperation().isExpiration());
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(ev.getOperation().isDistributed());
}
// Now setup recreate the region in the controller with NONE
// so test can do local destroys.
rgn.localDestroyRegion();
rgnAtts.setDataPolicy(DataPolicy.NORMAL);
rgn = createRegion(rgnName, rgnAtts.create());
// now see if net loader is working
Object v2 = rgn.get("key2");
assertEquals("LV 2", v2);
// now confirm that netload does not cause a conflict
txMgr.begin();
myTXId = txMgr.getTransactionId();
rgn.create("key3", "txValue3");
{
TXStateProxy tx = ((TXManagerImpl) txMgr).internalSuspend();
// do a get outside of the transaction to force a net load
Object v3 = rgn.get("key3");
assertEquals("LV 3", v3);
((TXManagerImpl) txMgr).internalResume(tx);
}
// make sure transactional view is still correct
assertEquals("txValue3", rgn.getEntry("key3").getValue());
txMgr.commit();
getSystem().getLogWriter().info("testTXUpdateLoadNoConflict: did commit");
assertEquals("txValue3", rgn.getEntry("key3").getValue());
{
Collection events = tl.lastEvent.getCreateEvents();
assertEquals(1, events.size());
EntryEvent ev = (EntryEvent) events.iterator().next();
assertEquals(myTXId, ev.getTransactionId());
assertTrue(ev.getRegion() == rgn);
assertEquals("key3", ev.getKey());
assertEquals("txValue3", ev.getNewValue());
assertEquals(null, ev.getOldValue());
assertTrue(!ev.getOperation().isLocalLoad());
assertTrue(!ev.getOperation().isNetLoad());
assertTrue(!ev.getOperation().isLoad());
assertTrue(!ev.getOperation().isNetSearch());
assertTrue(!ev.getOperation().isExpiration());
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(ev.getOperation().isDistributed());
}
// now see if tx net loader is working
// now confirm that netload does not cause a conflict
txMgr.begin();
myTXId = txMgr.getTransactionId();
Object v4 = rgn.get("key4");
assertEquals("LV 4", v4);
assertEquals("LV 4", rgn.get("key4"));
assertEquals("LV 4", rgn.getEntry("key4").getValue());
txMgr.rollback();
// confirm that netLoad is transactional
assertEquals("LV 5", rgn.get("key4"));
assertEquals("LV 5", rgn.getEntry("key4").getValue());
// make sure non-tx netsearch works
assertEquals("txValue", rgn.get("key"));
assertEquals("txValue", rgn.getEntry("key").getValue());
// make sure net-search result does not conflict with commit
rgn.localInvalidate("key");
txMgr.begin();
myTXId = txMgr.getTransactionId();
rgn.put("key", "new txValue");
{
TXStateProxy tx = ((TXManagerImpl) txMgr).internalSuspend();
// do a get outside of the transaction to force a netsearch
// does a netsearch
assertEquals("txValue", rgn.get("key"));
assertEquals("txValue", rgn.getEntry("key").getValue());
((TXManagerImpl) txMgr).internalResume(tx);
}
// make sure transactional view is still correct
assertEquals("new txValue", rgn.getEntry("key").getValue());
txMgr.commit();
// give other side change to process commit
flushIfNecessary(rgn);
getSystem().getLogWriter().info("testTXUpdateLoadNoConflict: did commit");
assertEquals("new txValue", rgn.getEntry("key").getValue());
{
Collection events = tl.lastEvent.getPutEvents();
assertEquals(1, events.size());
EntryEvent ev = (EntryEvent) events.iterator().next();
assertEquals(myTXId, ev.getTransactionId());
assertTrue(ev.getRegion() == rgn);
assertEquals("key", ev.getKey());
assertEquals("new txValue", ev.getNewValue());
assertEquals(null, ev.getOldValue());
assertTrue(!ev.getOperation().isLocalLoad());
assertTrue(!ev.getOperation().isNetLoad());
assertTrue(!ev.getOperation().isLoad());
assertTrue(!ev.getOperation().isNetSearch());
assertTrue(!ev.getOperation().isExpiration());
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(ev.getOperation().isDistributed());
}
// make sure tx local invalidate allows netsearch
Object localCmtValue = rgn.getEntry("key").getValue();
txMgr.begin();
assertSame(localCmtValue, rgn.getEntry("key").getValue());
rgn.localInvalidate("key");
assertNull(rgn.getEntry("key").getValue());
// now make sure a get will do a netsearch and find the value
// in the other vm instead of the one in local cmt state
Object txValue = rgn.get("key");
assertNotSame(localCmtValue, txValue);
assertSame(txValue, rgn.get("key"));
assertNotSame(localCmtValue, rgn.getEntry("key").getValue());
// make sure we did a search and not a load
assertEquals(localCmtValue, rgn.getEntry("key").getValue());
// now make sure that if we do a tx distributed invalidate
// that we will do a load and not a search
rgn.invalidate("key");
assertNull(rgn.getEntry("key").getValue());
txValue = rgn.get("key");
assertEquals("LV 6", txValue);
assertSame(txValue, rgn.get("key"));
assertEquals("LV 6", rgn.getEntry("key").getValue());
// now make sure after rollback that local cmt state has not changed
txMgr.rollback();
assertSame(localCmtValue, rgn.getEntry("key").getValue());
} catch (Exception e) {
CacheFactory.getInstance(getSystem()).close();
getSystem().getLogWriter().fine("testTXUpdateLoadNoConflict: Caused exception in createRegion");
throw e;
}
}
use of org.apache.geode.internal.cache.TXStateProxy in project geode by apache.
the class ExecuteFunction66 method executeFunctionaLocally.
private void executeFunctionaLocally(final Function fn, final FunctionContext cx, final ServerToClientFunctionResultSender65 sender, DM dm, final FunctionStats stats) throws IOException {
long startExecution = stats.startTime();
stats.startFunctionExecution(fn.hasResult());
if (fn.hasResult()) {
fn.execute(cx);
if (!((ServerToClientFunctionResultSender65) sender).isLastResultReceived() && fn.hasResult()) {
throw new FunctionException(LocalizedStrings.ExecuteFunction_THE_FUNCTION_0_DID_NOT_SENT_LAST_RESULT.toString(fn.getId()));
}
} else {
/*
* if dm is null it mean cache is also null. Transactional function without cache cannot be
* executed.
*/
final TXStateProxy txState = TXManagerImpl.getCurrentTXState();
Runnable functionExecution = new Runnable() {
public void run() {
InternalCache cache = null;
try {
if (txState != null) {
cache = GemFireCacheImpl.getExisting("executing function");
cache.getTxManager().masqueradeAs(txState);
if (cache.getLoggerI18n().warningEnabled() && !ASYNC_TX_WARNING_ISSUED) {
ASYNC_TX_WARNING_ISSUED = true;
cache.getLoggerI18n().warning(LocalizedStrings.ExecuteFunction66_TRANSACTIONAL_FUNCTION_WITHOUT_RESULT);
}
}
fn.execute(cx);
} catch (InternalFunctionInvocationTargetException internalfunctionException) {
// Fix for #44709: User should not be aware of
// InternalFunctionInvocationTargetException. No instance of
// InternalFunctionInvocationTargetException is giving useful
// information to user to take any corrective action hence logging
// this at fine level logging
// 1> Incase of HA FucntionInvocationTargetException thrown. Since
// it is HA, function will be reexecuted on right node
// 2> in case of HA member departed
stats.endFunctionExecutionWithException(fn.hasResult());
if (logger.isDebugEnabled()) {
logger.debug(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, new Object[] { fn }), internalfunctionException);
}
} catch (FunctionException functionException) {
stats.endFunctionExecutionWithException(fn.hasResult());
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, fn), functionException);
} catch (Exception exception) {
stats.endFunctionExecutionWithException(fn.hasResult());
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, fn), exception);
} finally {
if (txState != null && cache != null) {
cache.getTxManager().unmasquerade(txState);
}
}
}
};
if (dm == null) {
/**
* Executing the function in its own thread pool as FunctionExecution Thread pool of
* DisributionManager is not yet available.
*/
execService.execute(functionExecution);
} else {
final DistributionManager newDM = (DistributionManager) dm;
newDM.getFunctionExcecutor().execute(functionExecution);
}
}
stats.endFunctionExecution(startExecution, fn.hasResult());
}
use of org.apache.geode.internal.cache.TXStateProxy in project geode by apache.
the class IndexManager method createIndex.
// @todo need more specific list of exceptions
/**
* Create an index that can be used when executing queries.
*
* @param indexName the name of this index, used for statistics collection
* @param indexType the type of index
* @param origIndexedExpression the expression to index on, a function dependent on region entries
* individually.
* @param origFromClause expression that evaluates to the collection(s) that will be queried over,
* must contain one and only one region path.
* @return the newly created Index
*/
public Index createIndex(String indexName, IndexType indexType, String origIndexedExpression, String origFromClause, String imports, ExecutionContext externalContext, PartitionedIndex prIndex, boolean loadEntries) throws IndexNameConflictException, IndexExistsException, IndexInvalidException {
if (QueryMonitor.isLowMemory()) {
throw new IndexInvalidException(LocalizedStrings.IndexCreationMsg_CANCELED_DUE_TO_LOW_MEMORY.toLocalizedString());
}
boolean oldReadSerialized = DefaultQuery.getPdxReadSerialized();
DefaultQuery.setPdxReadSerialized(this.region.getCache(), true);
TXStateProxy tx = null;
if (!((InternalCache) this.region.getCache()).isClient()) {
tx = ((TXManagerImpl) this.region.getCache().getCacheTransactionManager()).internalSuspend();
}
try {
// for now this is the only option
String projectionAttributes = "*";
if (getIndex(indexName) != null) {
throw new IndexNameConflictException(LocalizedStrings.IndexManager_INDEX_NAMED_0_ALREADY_EXISTS.toLocalizedString(indexName));
}
IndexCreationHelper helper = null;
boolean isCompactOrHash = false;
// to recalculate the index key for the entry for comparisons during query.
if (indexType == IndexType.HASH && isOverFlowRegion()) {
indexType = IndexType.FUNCTIONAL;
}
if (indexType != IndexType.PRIMARY_KEY) {
helper = new FunctionalIndexCreationHelper(origFromClause, origIndexedExpression, projectionAttributes, imports, (InternalCache) region.getCache(), externalContext, this);
// Asif: For now support Map index as non compact .expand later
// The limitation for compact range index also apply to hash index for now
isCompactOrHash = shouldCreateCompactIndex((FunctionalIndexCreationHelper) helper);
} else if (indexType == IndexType.PRIMARY_KEY) {
helper = new PrimaryKeyIndexCreationHelper(origFromClause, origIndexedExpression, projectionAttributes, (InternalCache) region.getCache(), externalContext, this);
} else {
throw new AssertionError("Don't know how to set helper for " + indexType);
}
if (!isCompactOrHash && indexType != IndexType.PRIMARY_KEY) {
if (indexType == IndexType.HASH) {
if (!isIndexMaintenanceTypeSynchronous()) {
throw new UnsupportedOperationException(LocalizedStrings.DefaultQueryService_HASH_INDEX_CREATION_IS_NOT_SUPPORTED_FOR_ASYNC_MAINTENANCE.toLocalizedString());
}
throw new UnsupportedOperationException(LocalizedStrings.DefaultQueryService_HASH_INDEX_CREATION_IS_NOT_SUPPORTED_FOR_MULTIPLE_ITERATORS.toLocalizedString());
}
// Overflow is not supported with range index.
if (isOverFlowRegion()) {
throw new UnsupportedOperationException(LocalizedStrings.DefaultQueryService_INDEX_CREATION_IS_NOT_SUPPORTED_FOR_REGIONS_WHICH_OVERFLOW_TO_DISK_THE_REGION_INVOLVED_IS_0.toLocalizedString(region.getFullPath()));
}
// OffHeap is not supported with range index.
if (isOffHeap()) {
if (!isIndexMaintenanceTypeSynchronous()) {
throw new UnsupportedOperationException(LocalizedStrings.DefaultQueryService_OFF_HEAP_INDEX_CREATION_IS_NOT_SUPPORTED_FOR_ASYNC_MAINTENANCE_THE_REGION_IS_0.toLocalizedString(region.getFullPath()));
}
throw new UnsupportedOperationException(LocalizedStrings.DefaultQueryService_OFF_HEAP_INDEX_CREATION_IS_NOT_SUPPORTED_FOR_MULTIPLE_ITERATORS_THE_REGION_IS_0.toLocalizedString(region.getFullPath()));
}
}
if (logger.isDebugEnabled()) {
logger.debug("Started creating index with indexName: {} On region: {}", indexName, region.getFullPath());
}
if (IndexManager.testHook != null) {
if (logger.isDebugEnabled()) {
logger.debug("IndexManager TestHook is set.");
}
if (((LocalRegion) this.region).isInitialized()) {
testHook.hook(1);
} else {
testHook.hook(0);
}
}
IndexTask indexTask = new IndexTask(indexName, indexType, origFromClause, origIndexedExpression, helper, isCompactOrHash, prIndex, loadEntries);
FutureTask<Index> indexFutureTask = new FutureTask<Index>(indexTask);
Object oldIndex = this.indexes.putIfAbsent(indexTask, indexFutureTask);
Index index = null;
boolean interrupted = false;
try {
if (oldIndex == null) {
// Initialize index.
indexFutureTask.run();
// Set the index.
index = (Index) indexFutureTask.get();
} else {
// Check if index creation is complete.
if (!(oldIndex instanceof Index)) {
// Some other thread is creating the same index.
// Wait for index to be initialized from other thread.
((Future) oldIndex).get();
}
// from this thread.
if (getIndex(indexName) != null) {
throw new IndexNameConflictException(LocalizedStrings.IndexManager_INDEX_NAMED_0_ALREADY_EXISTS.toLocalizedString(indexName));
} else {
throw new IndexExistsException(LocalizedStrings.IndexManager_SIMILAR_INDEX_EXISTS.toLocalizedString());
}
}
} catch (InterruptedException ignored) {
interrupted = true;
} catch (ExecutionException ee) {
Throwable c = ee.getCause();
if (c instanceof IndexNameConflictException) {
throw (IndexNameConflictException) c;
} else if (c instanceof IndexExistsException) {
throw (IndexExistsException) c;
} else if (c instanceof IMQException) {
throw new IndexInvalidException(c.getMessage());
}
throw new IndexInvalidException(ee);
} finally {
// the map.
if (oldIndex == null && index == null) {
Object ind = this.indexes.get(indexTask);
if (ind != null && !(ind instanceof Index)) {
this.indexes.remove(indexTask);
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
}
assert (index != null);
if (logger.isDebugEnabled()) {
logger.debug("Completed creating index with indexName: {} On region: {}", indexName, region.getFullPath());
}
return index;
} finally {
DefaultQuery.setPdxReadSerialized(this.region.getCache(), oldReadSerialized);
if (tx != null) {
((TXManagerImpl) this.region.getCache().getCacheTransactionManager()).internalResume(tx);
}
}
}
Aggregations