use of org.apache.geode.InternalGemFireException in project geode by apache.
the class PartitionedRegionDataStore method removeBucket.
/**
* Removes a redundant bucket hosted by this data store. The rebalancer invokes this method
* directly or sends this member a message to invoke it.
*
* From the spec:
*
* How to Remove a Redundant Bucket
*
* This operation is done by the rebalancer (REB) and can only be done on non-primary buckets. If
* you want to remove a primary bucket first send one of its peers "become primary" and then send
* it "unhost" (we could offer a "unhost" option on "become primary" or a "becomePrimary" option
* on "create redundant"). The member that has the bucket being removed is called the bucket host
* (BH).
*
* 1. REB sends an "unhostBucket" message to BH. This message will be rejected if the member finds
* itself to be the primary or if he doesn't host the bucket by sending a failure reply to REB. 2.
* BH marks itself as "not-hosting". This causes any read operations that come in to not start and
* retry. BH also updates the advisor to know that it is no longer hosting the bucket. 3. BH then
* waits for any in-progress reads (which read ops to wait for are TBD) to complete. 4. BH then
* removes the bucket region from its cache. 5. BH then sends a success reply to REB.
*
* This method is now also used by the PartitionManager. For the PartitionManager, it does remove
* the primary bucket.
*
* @param bucketId the id of the bucket to remove
*
* @return true if the bucket was removed; false if unable to remove or if bucket is not hosted
*/
public boolean removeBucket(int bucketId, boolean forceRemovePrimary) {
waitForInProgressBackup();
// finished recovering from disk
if (!this.partitionedRegion.getRedundancyProvider().isPersistentRecoveryComplete()) {
if (logger.isDebugEnabled()) {
logger.debug("Returning false from removeBucket because we have not finished recovering all colocated regions from disk");
}
return false;
}
// Lock out bucket creation while doing this :-)
StoppableWriteLock lock = this.bucketCreationLock.writeLock();
lock.lock();
try {
BucketRegion bucketRegion = this.localBucket2RegionMap.get(Integer.valueOf(bucketId));
if (bucketRegion == null) {
if (logger.isDebugEnabled()) {
logger.debug("Returning true from removeBucket because we don't have the bucket we've been told to remove");
}
return true;
}
BucketAdvisor bucketAdvisor = bucketRegion.getBucketAdvisor();
Lock writeLock = bucketAdvisor.getActiveWriteLock();
// Fix for 43613 - don't remove the bucket
// if we are primary. We hold the lock here
// to prevent this member from becoming primary until this
// member is no longer hosting the bucket.
writeLock.lock();
try {
if (!forceRemovePrimary && bucketAdvisor.isPrimary()) {
return false;
}
// recurse down to each tier of children to remove first
removeBucketForColocatedChildren(bucketId, forceRemovePrimary);
if (bucketRegion.getPartitionedRegion().isShadowPR()) {
if (bucketRegion.getPartitionedRegion().getColocatedWithRegion() != null) {
bucketRegion.getPartitionedRegion().getColocatedWithRegion().getRegionAdvisor().getBucketAdvisor(bucketId).setShadowBucketDestroyed(true);
}
}
bucketAdvisor.getProxyBucketRegion().removeBucket();
} finally {
writeLock.unlock();
}
if (logger.isDebugEnabled()) {
logger.debug("Removed bucket {} from advisor", bucketRegion);
}
// Flush the state of the primary. This make sure we have processed
// all operations were sent out before we removed our profile from
// our peers.
//
// Another option, instead of using the StateFlushOperation, could
// be to send a message which waits until it acquires the
// activePrimaryMoveLock on primary the bucket region. That would also
// wait for in progress writes. I choose to use the StateFlushOperation
// because it won't block write operations while we're trying to acquire
// the activePrimaryMoveLock
InternalDistributedMember primary = bucketAdvisor.getPrimary();
InternalDistributedMember myId = this.partitionedRegion.getDistributionManager().getDistributionManagerId();
if (!myId.equals(primary)) {
StateFlushOperation flush = new StateFlushOperation(bucketRegion);
int executor = DistributionManager.WAITING_POOL_EXECUTOR;
try {
flush.flush(Collections.singleton(primary), myId, executor, false);
} catch (InterruptedException e) {
this.partitionedRegion.getCancelCriterion().checkCancelInProgress(e);
Thread.currentThread().interrupt();
throw new InternalGemFireException("Interrupted while flushing state");
}
if (logger.isDebugEnabled()) {
logger.debug("Finished state flush for removal of {}", bucketRegion);
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("We became primary while destroying the bucket. Too late to stop now.");
}
}
bucketRegion.invokePartitionListenerAfterBucketRemoved();
bucketRegion.preDestroyBucket(bucketId);
bucketRegion.localDestroyRegion();
bucketAdvisor.getProxyBucketRegion().finishRemoveBucket();
if (logger.isDebugEnabled()) {
logger.debug("Destroyed {}", bucketRegion);
}
this.localBucket2RegionMap.remove(Integer.valueOf(bucketId));
this.partitionedRegion.getPrStats().incBucketCount(-1);
return true;
} finally {
lock.unlock();
}
}
use of org.apache.geode.InternalGemFireException in project geode by apache.
the class PartitionedRegion method fetchEntries.
/**
* Fetches entries from local and remote nodes and appends these to register-interest response.
*/
public void fetchEntries(HashMap<Integer, HashSet> bucketKeys, VersionedObjectList values, ServerConnection servConn) throws IOException {
int retryAttempts = calcRetry();
RetryTimeKeeper retryTime = null;
HashMap<Integer, HashSet> failures = new HashMap<Integer, HashSet>(bucketKeys);
HashMap<InternalDistributedMember, HashMap<Integer, HashSet>> nodeToBuckets = new HashMap<InternalDistributedMember, HashMap<Integer, HashSet>>();
while (--retryAttempts >= 0 && !failures.isEmpty()) {
nodeToBuckets.clear();
updateNodeToBucketMap(nodeToBuckets, failures);
failures.clear();
HashMap<Integer, HashSet> localBuckets = nodeToBuckets.remove(getMyId());
if (localBuckets != null && !localBuckets.isEmpty()) {
Set keys = new HashSet();
for (Integer id : localBuckets.keySet()) {
keys.addAll(localBuckets.get(id));
}
if (!keys.isEmpty()) {
BaseCommand.appendNewRegisterInterestResponseChunkFromLocal(this, values, "keyList", keys, servConn);
}
}
// Handle old nodes for Rolling Upgrade support
Set<Integer> failedSet = handleOldNodes(nodeToBuckets, values, servConn);
// remote nodes.
if (!failedSet.isEmpty()) {
for (Integer bId : failedSet) {
failures.put(bId, bucketKeys.get(bId));
}
updateNodeToBucketMap(nodeToBuckets, failures);
failures.clear();
}
fetchRemoteEntries(nodeToBuckets, failures, values, servConn);
if (!failures.isEmpty()) {
if (retryTime == null) {
retryTime = new RetryTimeKeeper(this.retryTimeout);
}
if (!waitForFetchRemoteEntriesRetry(retryTime)) {
break;
}
}
}
if (!failures.isEmpty()) {
throw new InternalGemFireException("Failed to fetch entries from " + failures.size() + " buckets of region " + getName() + " for register interest.");
}
}
use of org.apache.geode.InternalGemFireException in project geode by apache.
the class DLockService method releaseTryLocks.
public void releaseTryLocks(DLockBatchId batchId, Callable<Boolean> untilCondition) {
final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
if (isDebugEnabled_DLS) {
logger.trace(LogMarker.DLS, "[DLockService.releaseTryLocks] enter: {}", batchId);
}
long statStart = getStats().startLockRelease();
try {
boolean lockBatch = true;
boolean released = false;
while (!released) {
try {
boolean quit = untilCondition.call();
if (quit) {
return;
}
} catch (Exception e) {
throw new InternalGemFireException("unexpected exception", e);
}
checkDestroyed();
LockGrantorId theLockGrantorId = null;
theLockGrantorId = batchId.getLockGrantorId();
synchronized (this.lockGrantorIdLock) {
if (!checkLockGrantorId(theLockGrantorId)) {
// the grantor is different so break and skip DLockReleaseProcessor
break;
}
}
released = callReleaseProcessor(theLockGrantorId.getLockGrantorMember(), batchId, lockBatch, -1);
if (!released) {
notLockGrantorId(theLockGrantorId, 100, TimeUnit.MILLISECONDS);
}
}
} finally {
decActiveLocks();
getStats().endLockRelease(statStart);
if (isDebugEnabled_DLS) {
logger.trace(LogMarker.DLS, "[DLockService.releaseTryLocks] exit: {}", batchId);
}
}
}
use of org.apache.geode.InternalGemFireException in project geode by apache.
the class StatArchiveWriter method allocatedResourceType.
public void allocatedResourceType(ResourceType resourceType) {
if (logger.isTraceEnabled(LogMarker.STATISTICS)) {
logger.trace(LogMarker.STATISTICS, "StatArchiveWriter#allocatedResourceType resourceType={}", resourceType);
}
if (resourceType.getStatisticDescriptors().length >= ILLEGAL_STAT_OFFSET) {
throw new InternalGemFireException(LocalizedStrings.StatArchiveWriter_COULD_NOT_ARCHIVE_TYPE_0_BECAUSE_IT_HAD_MORE_THAN_1_STATISTICS.toLocalizedString(new Object[] { resourceType.getStatisticsType().getName(), Integer.valueOf(ILLEGAL_STAT_OFFSET - 1) }));
}
// write the type to the archive
try {
this.dataOut.writeByte(RESOURCE_TYPE_TOKEN);
this.dataOut.writeInt(resourceType.getId());
this.dataOut.writeUTF(resourceType.getStatisticsType().getName());
this.dataOut.writeUTF(resourceType.getStatisticsType().getDescription());
StatisticDescriptor[] stats = resourceType.getStatisticDescriptors();
this.dataOut.writeShort(stats.length);
if (this.trace && (traceStatisticsTypeName == null || traceStatisticsTypeName.equals(resourceType.getStatisticsType().getName()))) {
this.traceDataOut.println("allocatedResourceType#writeByte RESOURCE_TYPE_TOKEN: " + RESOURCE_TYPE_TOKEN);
this.traceDataOut.println("allocatedResourceType#writeInt resourceType.getId(): " + resourceType.getId());
this.traceDataOut.println("allocatedResourceType#writeUTF resourceType.getStatisticsType().getName(): " + resourceType.getStatisticsType().getName());
this.traceDataOut.println("allocatedResourceType#writeUTF resourceType.getStatisticsType().getDescription(): " + resourceType.getStatisticsType().getDescription());
this.traceDataOut.println("allocatedResourceType#writeShort stats.length: " + stats.length);
}
for (int i = 0; i < stats.length; i++) {
this.dataOut.writeUTF(stats[i].getName());
this.dataOut.writeByte(((StatisticDescriptorImpl) stats[i]).getTypeCode());
this.dataOut.writeBoolean(stats[i].isCounter());
this.dataOut.writeBoolean(stats[i].isLargerBetter());
this.dataOut.writeUTF(stats[i].getUnit());
this.dataOut.writeUTF(stats[i].getDescription());
if (this.trace && (traceStatisticsTypeName == null || traceStatisticsTypeName.equals(resourceType.getStatisticsType().getName()))) {
this.traceDataOut.println("allocatedResourceType#writeUTF stats[i].getName(): " + stats[i].getName());
this.traceDataOut.println("allocatedResourceType#writeByte ((StatisticDescriptorImpl)stats[i]).getTypeCode(): " + ((StatisticDescriptorImpl) stats[i]).getTypeCode());
this.traceDataOut.println("allocatedResourceType#writeBoolean stats[i].isCounter(): " + stats[i].isCounter());
this.traceDataOut.println("allocatedResourceType#writeBoolean stats[i].isLargerBetter(): " + stats[i].isLargerBetter());
this.traceDataOut.println("allocatedResourceType#writeUTF stats[i].getUnit(): " + stats[i].getUnit());
this.traceDataOut.println("allocatedResourceType#writeUTF stats[i].getDescription(): " + stats[i].getDescription());
}
}
} catch (IOException ex) {
throw new GemFireIOException(LocalizedStrings.StatArchiveWriter_FAILED_WRITING_NEW_RESOURCE_TYPE_TO_STATISTIC_ARCHIVE.toLocalizedString(), ex);
}
}
use of org.apache.geode.InternalGemFireException in project geode by apache.
the class PRQueryProcessor method executeWithThreadPool.
private void executeWithThreadPool(Collection<Collection> resultCollector) throws QueryException, InterruptedException, ForceReattemptException {
if (Thread.interrupted())
throw new InterruptedException();
java.util.List callableTasks = buildCallableTaskList(resultCollector);
ExecutorService execService = PRQueryExecutor.getExecutorService();
boolean reattemptNeeded = false;
ForceReattemptException fre = null;
if (callableTasks != null && !callableTasks.isEmpty()) {
List futures = null;
try {
futures = execService.invokeAll(callableTasks, 300, TimeUnit.SECONDS);
} catch (RejectedExecutionException rejectedExecutionEx) {
throw rejectedExecutionEx;
}
if (futures != null) {
Iterator itr = futures.iterator();
while (itr.hasNext() && !execService.isShutdown() && !execService.isTerminated()) {
// this._prds.partitionedRegion.checkReadiness();
Future fut = (Future) itr.next();
QueryTask.BucketQueryResult bqr = null;
try {
bqr = (QueryTask.BucketQueryResult) fut.get(BUCKET_QUERY_TIMEOUT, TimeUnit.SECONDS);
// if (retry.booleanValue()) {
// reattemptNeeded = true;
// fre = (ForceReattemptException)bqr.getException();
// } else {
// handles an exception if there was one,
bqr.handleAndThrowException();
// }
if (bqr.retry) {
reattemptNeeded = true;
}
} catch (TimeoutException e) {
throw new InternalGemFireException(LocalizedStrings.PRQueryProcessor_TIMED_OUT_WHILE_EXECUTING_QUERY_TIME_EXCEEDED_0.toLocalizedString(BUCKET_QUERY_TIMEOUT), e);
} catch (ExecutionException ee) {
Throwable cause = ee.getCause();
if (cause instanceof QueryException) {
throw (QueryException) cause;
} else {
throw new InternalGemFireException(LocalizedStrings.PRQueryProcessor_GOT_UNEXPECTED_EXCEPTION_WHILE_EXECUTING_QUERY_ON_PARTITIONED_REGION_BUCKET.toLocalizedString(), cause);
}
}
}
CompiledSelect cs = this.query.getSimpleSelect();
if (cs != null && (cs.isOrderBy() || cs.isGroupBy())) {
ExecutionContext context = new QueryExecutionContext(this.parameters, pr.getCache());
int limit = this.query.getLimit(parameters);
Collection mergedResults = coalesceOrderedResults(resultCollector, context, cs, limit);
resultCollector.clear();
resultCollector.add(mergedResults);
}
}
}
if (execService == null || execService.isShutdown() || execService.isTerminated()) {
this._prds.partitionedRegion.checkReadiness();
}
if (reattemptNeeded) {
throw fre;
}
}
Aggregations