use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class SystemAdmin method compactAllDiskStores.
public static void compactAllDiskStores(List args) throws AdminException {
InternalDistributedSystem ads = getAdminCnx();
Map<DistributedMember, Set<PersistentID>> status = AdminDistributedSystemImpl.compactAllDiskStores(ads.getDistributionManager());
System.out.println("Compaction complete.");
System.out.println("The following disk stores compacted some files:");
for (Set<PersistentID> memberStores : status.values()) {
for (PersistentID store : memberStores) {
System.out.println("\t" + store);
}
}
}
use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class DistTXStateProxyImplOnCoordinator method doCommit.
/*
* [DISTTX] TODO - Handle result TXMessage
*/
private boolean doCommit() {
boolean finalResult = true;
final GemFireCacheImpl cache = GemFireCacheImpl.getExisting("Applying Dist TX Commit");
final DM dm = cache.getDistributionManager();
// Create Tx Participants
Set<DistributedMember> txRemoteParticpants = getTxRemoteParticpants(dm);
// create processor and commit message
DistTXCommitMessage.DistTxCommitReplyProcessor processor = new DistTXCommitMessage.DistTxCommitReplyProcessor(this.getTxId(), dm, txRemoteParticpants, target2realDeals);
// TODO [DISTTX} whats ack threshold?
processor.enableSevereAlertProcessing();
final DistTXCommitMessage commitMsg = new DistTXCommitMessage(this.getTxId(), this.onBehalfOfClientMember, processor);
// send commit message to remote nodes
ArrayList<ArrayList<DistTxThinEntryState>> entryEventList = new ArrayList<>();
TreeSet<String> sortedRegionName = new TreeSet<>();
for (DistributedMember remoteNode : txRemoteParticpants) {
DistTXCoordinatorInterface remoteTXStateStub = target2realDeals.get(remoteNode);
if (remoteTXStateStub.isTxState()) {
throw new UnsupportedOperationInTransactionException(LocalizedStrings.DISTTX_TX_EXPECTED.toLocalizedString("DistPeerTXStateStub", remoteTXStateStub.getClass().getSimpleName()));
}
try {
populateEntryEventList(remoteNode, entryEventList, sortedRegionName);
commitMsg.setEntryStateList(entryEventList);
remoteTXStateStub.setCommitMessage(commitMsg, dm);
remoteTXStateStub.commit();
} finally {
remoteTXStateStub.setCommitMessage(null, null);
remoteTXStateStub.finalCleanup();
}
if (logger.isDebugEnabled()) {
logger.debug("DistTXStateProxyImplOnCoordinator.doCommit Sent Message target = " + remoteNode + " ,sortedRegions=" + sortedRegionName + " ,entryEventList=" + printEntryEventList(entryEventList) + " ,txEntryEventMap=" + printEntryEventMap(this.txEntryEventMap));
}
}
// Do commit on local node
DistTXCoordinatorInterface localTXState = target2realDeals.get(dm.getId());
if (localTXState != null) {
if (!localTXState.isTxState()) {
throw new UnsupportedOperationInTransactionException(LocalizedStrings.DISTTX_TX_EXPECTED.toLocalizedString("DistTXStateOnCoordinator", localTXState.getClass().getSimpleName()));
}
populateEntryEventList(dm.getId(), entryEventList, sortedRegionName);
((DistTXStateOnCoordinator) localTXState).setDistTxEntryStates(entryEventList);
localTXState.commit();
TXCommitMessage localResultMsg = localTXState.getCommitMessage();
if (logger.isDebugEnabled()) {
logger.debug("DistTXStateProxyImplOnCoordinator.doCommit local = " + dm.getId() + " ,sortedRegions=" + sortedRegionName + " ,entryEventList=" + printEntryEventList(entryEventList) + " ,txEntryEventMap=" + printEntryEventMap(this.txEntryEventMap) + " ,result= " + (localResultMsg != null) + " ,finalResult-old= " + finalResult);
}
finalResult = finalResult && (localResultMsg != null);
}
/*
* [DISTTX] TODO Any test hooks
*/
// if (internalAfterIndividualSend != null) {
// internalAfterIndividualSend.run();
// }
/*
* [DISTTX] TODO see how to handle exception
*/
/*
* [DISTTX] TODO Any test hooks
*/
// if (internalAfterIndividualCommitProcess != null) {
// // Testing callback
// internalAfterIndividualCommitProcess.run();
// }
{
// Wait for results
dm.getCancelCriterion().checkCancelInProgress(null);
processor.waitForPrecommitCompletion();
// [DISTTX} TODO Handle stats
dm.getStats().incCommitWaits();
Map<DistributedMember, TXCommitMessage> remoteResults = processor.getCommitResponseMap();
for (Entry<DistributedMember, TXCommitMessage> e : remoteResults.entrySet()) {
DistributedMember target = e.getKey();
TXCommitMessage remoteResultMsg = e.getValue();
if (logger.isDebugEnabled()) {
// TODO - make this trace level
logger.debug("DistTXStateProxyImplOnCoordinator.doCommit got results from target = " + target + " ,result= " + (remoteResultMsg != null) + " ,finalResult-old= " + finalResult);
}
finalResult = finalResult && remoteResultMsg != null;
}
}
if (logger.isDebugEnabled()) {
logger.debug("DistTXStateProxyImplOnCoordinator.doCommit finalResult= " + finalResult);
}
return finalResult;
}
use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class DistTXStateProxyImplOnCoordinator method getOwnerForKey.
/*
* Do not return null
*/
public DistributedMember getOwnerForKey(LocalRegion r, KeyInfo key) {
DistributedMember m = r.getOwnerForKey(key);
if (m == null) {
GemFireCacheImpl cache = GemFireCacheImpl.getExisting("getOwnerForKey");
m = cache.getDistributedSystem().getDistributedMember();
}
return m;
}
use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class DistTXStateProxyImplOnCoordinator method doPrecommit.
private boolean doPrecommit() {
boolean finalResult = true;
final GemFireCacheImpl cache = GemFireCacheImpl.getExisting("Applying Dist TX Precommit");
final DM dm = cache.getDistributionManager();
Set<DistributedMember> txRemoteParticpants = getTxRemoteParticpants(dm);
// create processor and precommit message
DistTXPrecommitMessage.DistTxPrecommitReplyProcessor processor = new DistTXPrecommitMessage.DistTxPrecommitReplyProcessor(this.getTxId(), dm, txRemoteParticpants, target2realDeals);
// TODO [DISTTX} whats ack threshold?
processor.enableSevereAlertProcessing();
final DistTXPrecommitMessage precommitMsg = new DistTXPrecommitMessage(this.getTxId(), this.onBehalfOfClientMember, processor);
// send precommit message to remote nodes
for (DistributedMember remoteNode : txRemoteParticpants) {
DistTXCoordinatorInterface remoteTXStateStub = target2realDeals.get(remoteNode);
if (remoteTXStateStub.isTxState()) {
throw new UnsupportedOperationInTransactionException(LocalizedStrings.DISTTX_TX_EXPECTED.toLocalizedString("DistPeerTXStateStub", remoteTXStateStub.getClass().getSimpleName()));
}
try {
remoteTXStateStub.setPrecommitMessage(precommitMsg, dm);
remoteTXStateStub.precommit();
} finally {
remoteTXStateStub.setPrecommitMessage(null, null);
}
if (logger.isDebugEnabled()) {
logger.debug("DistTXStateProxyImplOnCoordinator.doPrecommit Sent Message to target = " + remoteNode);
}
}
// Do precommit on local node
TreeSet<String> sortedRegionName = new TreeSet<>();
DistTXCoordinatorInterface localTXState = target2realDeals.get(dm.getId());
if (localTXState != null) {
if (!localTXState.isTxState()) {
throw new UnsupportedOperationInTransactionException(LocalizedStrings.DISTTX_TX_EXPECTED.toLocalizedString("DistTXStateOnCoordinator", localTXState.getClass().getSimpleName()));
}
localTXState.precommit();
boolean localResult = localTXState.getPreCommitResponse();
TreeMap<String, ArrayList<DistTxThinEntryState>> entryStateSortedMap = new TreeMap<String, ArrayList<DistTxThinEntryState>>();
ArrayList<ArrayList<DistTxThinEntryState>> entryEventList = null;
if (localResult) {
localResult = ((DistTXStateOnCoordinator) localTXState).populateDistTxEntryStateList(entryStateSortedMap);
if (localResult) {
entryEventList = new ArrayList<ArrayList<DistTxThinEntryState>>(entryStateSortedMap.values());
populateEntryEventMap(dm.getId(), entryEventList, sortedRegionName);
}
}
if (logger.isDebugEnabled()) {
logger.debug("DistTXStateProxyImplOnCoordinator.doPrecommit local = " + dm.getId() + " ,entryEventList=" + printEntryEventList(entryEventList) + " ,txRegionVersionsMap=" + printEntryEventMap(this.txEntryEventMap) + " ,result= " + localResult + " ,finalResult-old= " + finalResult);
}
finalResult = finalResult && localResult;
}
/*
* [DISTTX] TODO Any test hooks
*/
// if (internalAfterIndividualSend != null) {
// internalAfterIndividualSend.run();
// }
/*
* [DISTTX] TODO see how to handle exception
*/
/*
* [DISTTX] TODO Any test hooks
*/
// if (internalAfterIndividualCommitProcess != null) {
// // Testing callback
// internalAfterIndividualCommitProcess.run();
// }
{
// Wait for results
dm.getCancelCriterion().checkCancelInProgress(null);
processor.waitForPrecommitCompletion();
// [DISTTX} TODO Handle stats
// dm.getStats().incCommitWaits();
Map<DistributedMember, DistTxPrecommitResponse> remoteResults = processor.getCommitResponseMap();
for (Entry<DistributedMember, DistTxPrecommitResponse> e : remoteResults.entrySet()) {
DistributedMember target = e.getKey();
DistTxPrecommitResponse remoteResponse = e.getValue();
ArrayList<ArrayList<DistTxThinEntryState>> entryEventList = remoteResponse.getDistTxEntryEventList();
populateEntryEventMap(target, entryEventList, sortedRegionName);
if (logger.isDebugEnabled()) {
logger.debug("DistTXStateProxyImplOnCoordinator.doPrecommit got reply from target = " + target + " ,sortedRegions" + sortedRegionName + " ,entryEventList=" + printEntryEventList(entryEventList) + " ,txEntryEventMap=" + printEntryEventMap(this.txEntryEventMap) + " ,result= " + remoteResponse.getCommitState() + " ,finalResult-old= " + finalResult);
}
finalResult = finalResult && remoteResponse.getCommitState();
}
}
if (logger.isDebugEnabled()) {
logger.debug("DistTXStateProxyImplOnCoordinator.doPrecommit finalResult= " + finalResult);
}
return finalResult;
}
use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class DistTXStateProxyImplOnCoordinator method getSecondariesAndReplicasForTxOps.
/**
* creates a map of all secondaries(for PR) / replicas(for RR) to stubs to send commit message to
* those
*/
private HashMap<DistributedMember, DistTXCoordinatorInterface> getSecondariesAndReplicasForTxOps() {
final GemFireCacheImpl cache = GemFireCacheImpl.getExisting("getSecondariesAndReplicasForTxOps");
InternalDistributedMember currentNode = cache.getInternalDistributedSystem().getDistributedMember();
HashMap<DistributedMember, DistTXCoordinatorInterface> secondaryTarget2realDeals = new HashMap<>();
for (Entry<DistributedMember, DistTXCoordinatorInterface> e : target2realDeals.entrySet()) {
DistributedMember originalTarget = e.getKey();
DistTXCoordinatorInterface distPeerTxStateStub = e.getValue();
ArrayList<DistTxEntryEvent> primaryTxOps = distPeerTxStateStub.getPrimaryTransactionalOperations();
for (DistTxEntryEvent dtop : primaryTxOps) {
LocalRegion lr = dtop.getRegion();
// replicas or secondaries
Set<InternalDistributedMember> otherNodes = null;
if (lr instanceof PartitionedRegion) {
Set<InternalDistributedMember> allNodes = ((PartitionedRegion) dtop.getRegion()).getRegionAdvisor().getBucketOwners(dtop.getKeyInfo().getBucketId());
allNodes.remove(originalTarget);
otherNodes = allNodes;
} else if (lr instanceof DistributedRegion) {
otherNodes = ((DistributedRegion) lr).getCacheDistributionAdvisor().adviseInitializedReplicates();
otherNodes.remove(originalTarget);
}
if (otherNodes != null) {
for (InternalDistributedMember dm : otherNodes) {
// whether the target already exists due to other Tx op on the node
DistTXCoordinatorInterface existingDistPeerTXStateStub = target2realDeals.get(dm);
if (existingDistPeerTXStateStub == null) {
existingDistPeerTXStateStub = secondaryTarget2realDeals.get(dm);
if (existingDistPeerTXStateStub == null) {
DistTXCoordinatorInterface newTxStub = null;
if (currentNode.equals(dm)) {
// [DISTTX] TODO add a test case for this condition?
newTxStub = new DistTXStateOnCoordinator(this, false);
} else {
newTxStub = new DistPeerTXStateStub(this, dm, onBehalfOfClientMember);
}
newTxStub.addSecondaryTransactionalOperations(dtop);
secondaryTarget2realDeals.put(dm, newTxStub);
} else {
existingDistPeerTXStateStub.addSecondaryTransactionalOperations(dtop);
}
} else {
existingDistPeerTXStateStub.addSecondaryTransactionalOperations(dtop);
}
}
}
}
}
return secondaryTarget2realDeals;
}
Aggregations