use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class Oplog method basicSaveConflictVersionTag.
private void basicSaveConflictVersionTag(DiskRegionView dr, VersionTag tag, boolean async) throws IOException, InterruptedException {
boolean useNextOplog = false;
int adjustment = 0;
synchronized (this.lock) {
if (getOplogSet().getChild() != this) {
useNextOplog = true;
} else {
this.opState.initialize(OPLOG_CONFLICT_VERSION, dr.getId(), tag);
adjustment = getOpStateSize();
assert adjustment > 0;
long temp = (this.crf.currSize + adjustment);
if (temp > getMaxCrfSize() && !isFirstRecord()) {
switchOpLog(dr, adjustment, null);
// we can't reuse it since it contains variable length data
useNextOplog = true;
} else {
if (this.lockedForKRFcreate) {
CacheClosedException cce = new CacheClosedException("The disk store is closed.");
dr.getCancelCriterion().checkCancelInProgress(cce);
throw cce;
}
this.firstRecord = false;
writeOpLogBytes(this.crf, async, true);
this.crf.currSize = temp;
if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
logger.trace(LogMarker.PERSIST_WRITES, "basicSaveConflictVersionTag: drId={} versionStamp={} oplog#{}", dr.getId(), tag, getOplogId());
}
this.dirHolder.incrementTotalOplogSize(adjustment);
// Update the region version vector for the disk store.
// This needs to be done under lock so that we don't switch oplogs
// unit the version vector accurately represents what is in this oplog
RegionVersionVector rvv = dr.getRegionVersionVector();
if (rvv != null && dr.getFlags().contains(DiskRegionFlag.IS_WITH_VERSIONING)) {
rvv.recordVersion(tag.getMemberID(), tag.getRegionVersion());
}
}
clearOpState();
}
}
if (useNextOplog) {
if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
CacheObserverHolder.getInstance().afterSwitchingOplog();
}
Assert.assertTrue(getOplogSet().getChild() != this);
getOplogSet().getChild().basicSaveConflictVersionTag(dr, tag, async);
}
}
use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class DistributedRegion method lockLocallyForClear.
/**
* pause local operations so that a clear() can be performed and flush comm channels to the given
* member
*/
void lockLocallyForClear(DM dm, InternalDistributedMember locker, CacheEvent event) {
RegionVersionVector rvv = getVersionVector();
ARMLockTestHook armLockTestHook = getRegionMap().getARMLockTestHook();
if (armLockTestHook != null) {
armLockTestHook.beforeLock(this, event);
}
if (rvv != null) {
// block new operations from being applied to the region map
rvv.lockForClear(getFullPath(), dm, locker);
// Check for region destroyed after we have locked, to make sure
// we don't continue a clear if the region has been destroyed.
checkReadiness();
// Only need to flush if NOACK at this point
if (this.getAttributes().getScope().isDistributedNoAck()) {
Set<InternalDistributedMember> members = getDistributionAdvisor().adviseCacheOp();
StateFlushOperation.flushTo(members, this);
}
}
if (armLockTestHook != null) {
armLockTestHook.afterLock(this, null);
}
}
use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class DistTXState method updateRegionVersions.
/*
* If this is a primary member, for each entry in TXState, generate next region version and store
* in the entry.
*/
public void updateRegionVersions() {
Iterator<Map.Entry<LocalRegion, TXRegionState>> it = this.regions.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<LocalRegion, TXRegionState> me = it.next();
LocalRegion r = me.getKey();
TXRegionState txrs = me.getValue();
// Generate next region version only on the primary
if (!txrs.isCreatedDuringCommit()) {
try {
Set entries = txrs.getEntryKeys();
if (!entries.isEmpty()) {
Iterator entryIt = entries.iterator();
while (entryIt.hasNext()) {
Object key = entryIt.next();
TXEntryState txes = txrs.getTXEntryState(key);
RegionVersionVector rvv = r.getVersionVector();
if (rvv != null) {
long v = rvv.getNextVersion();
// txes.setNextRegionVersion(v);
txes.getDistTxEntryStates().setRegionVersion(v);
if (logger.isDebugEnabled()) {
logger.debug("Set next region version to " + v + " for region=" + r.getName() + "in TXEntryState for key" + key);
}
}
}
}
} catch (DiskAccessException dae) {
r.handleDiskAccessException(dae);
throw dae;
}
}
}
}
use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class AbstractRegionMap method clear.
/**
* Clear the region and, if an RVV is given, return a collection of the version sources in all
* remaining tags
*/
public Set<VersionSource> clear(RegionVersionVector rvv) {
Set<VersionSource> result = new HashSet<VersionSource>();
if (!_isOwnerALocalRegion()) {
// Fix for #41333. Just clear the the map
// if we failed during initialization.
_mapClear();
return null;
}
if (logger.isDebugEnabled()) {
logger.debug("Clearing entries for {} rvv={}", _getOwner(), " rvv=" + rvv);
}
LocalRegion lr = _getOwner();
RegionVersionVector localRvv = lr.getVersionVector();
incClearCount(lr);
// lock for size calcs if the region might have tombstones
Object lockObj = lr.getConcurrencyChecksEnabled() ? lr.getSizeGuard() : new Object();
synchronized (lockObj) {
if (rvv == null) {
int delta = 0;
try {
// TODO soplog need to determine if stats should
delta = sizeInVM();
// reflect only size in memory or the complete thing
} catch (GemFireIOException e) {
// ignore rather than throwing an exception during cache close
}
int tombstones = lr.getTombstoneCount();
_mapClear();
_getOwner().updateSizeOnClearRegion(delta - tombstones);
_getOwner().incTombstoneCount(-tombstones);
if (delta != 0) {
incEntryCount(-delta);
}
} else {
int delta = 0;
int tombstones = 0;
VersionSource myId = _getOwner().getVersionMember();
if (localRvv != rvv) {
localRvv.recordGCVersions(rvv);
}
final boolean isTraceEnabled = logger.isTraceEnabled();
for (RegionEntry re : regionEntries()) {
synchronized (re) {
Token value = re.getValueAsToken();
// if it's already being removed or the entry is being created we leave it alone
if (value == Token.REMOVED_PHASE1 || value == Token.REMOVED_PHASE2) {
continue;
}
VersionSource id = re.getVersionStamp().getMemberID();
if (id == null) {
id = myId;
}
if (rvv.contains(id, re.getVersionStamp().getRegionVersion())) {
if (isTraceEnabled) {
logger.trace("region clear op is removing {} {}", re.getKey(), re.getVersionStamp());
}
boolean tombstone = re.isTombstone();
// note: it.remove() did not reliably remove the entry so we use remove(K,V) here
if (_getMap().remove(re.getKey(), re)) {
if (OffHeapRegionEntryHelper.doesClearNeedToCheckForOffHeap()) {
// OFFHEAP _getValue ok
GatewaySenderEventImpl.release(re._getValue());
}
// disk at this point.
try {
re.removePhase1(lr, true);
} catch (RegionClearedException e) {
// do nothing, it's already cleared.
}
re.removePhase2();
lruEntryDestroy(re);
if (tombstone) {
_getOwner().incTombstoneCount(-1);
tombstones += 1;
} else {
delta += 1;
}
}
} else {
// rvv does not contain this entry so it is retained
result.add(id);
}
}
}
_getOwner().updateSizeOnClearRegion(delta);
incEntryCount(-delta);
incEntryCount(-tombstones);
if (logger.isDebugEnabled()) {
logger.debug("Size after clearing = {}", _getMap().size());
}
if (isTraceEnabled && _getMap().size() < 20) {
_getOwner().dumpBackingMap();
}
}
}
return result;
}
use of org.apache.geode.internal.cache.versions.RegionVersionVector in project geode by apache.
the class AbstractRegionMap method removeTombstone.
/** removes a tombstone that has expired locally */
public boolean removeTombstone(RegionEntry re, VersionHolder version, boolean isEviction, boolean isScheduledTombstone) {
boolean result = false;
int destroyedVersion = version.getEntryVersion();
synchronized (this._getOwner().getSizeGuard()) {
// do this sync first; see bug 51985
synchronized (re) {
int entryVersion = re.getVersionStamp().getEntryVersion();
if (!re.isTombstone() || entryVersion > destroyedVersion) {
if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT)) {
logger.trace(LogMarker.TOMBSTONE_COUNT, "tombstone for {} was resurrected with v{}; destroyed version was v{}; count is {}; entryMap size is {}", re.getKey(), re.getVersionStamp().getEntryVersion(), destroyedVersion, this._getOwner().getTombstoneCount(), size());
}
} else {
if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT)) {
if (entryVersion == destroyedVersion) {
// logging this can put tremendous pressure on the log writer in tests
// that "wait for silence"
logger.trace(LogMarker.TOMBSTONE_COUNT, "removing tombstone for {} with v{} rv{}; count is {}", re.getKey(), destroyedVersion, version.getRegionVersion(), (this._getOwner().getTombstoneCount() - 1));
} else {
logger.trace(LogMarker.TOMBSTONE_COUNT, "removing entry (v{}) that is older than an expiring tombstone (v{} rv{}) for {}", entryVersion, destroyedVersion, version.getRegionVersion(), re.getKey());
}
}
try {
re.setValue(_getOwner(), Token.REMOVED_PHASE2);
if (removeTombstone(re)) {
_getOwner().cancelExpiryTask(re);
result = true;
incEntryCount(-1);
// lruEntryDestroy(re); // tombstones are invisible to LRU
if (isScheduledTombstone) {
_getOwner().incTombstoneCount(-1);
}
RegionVersionVector vector = _getOwner().getVersionVector();
if (vector != null) {
vector.recordGCVersion(version.getMemberID(), version.getRegionVersion());
}
}
} catch (RegionClearedException e) {
// if the region has been cleared we don't need to remove the tombstone
} catch (RegionDestroyedException e) {
// if the region has been destroyed, the tombstone is already
// gone. Catch an exception to avoid an error from the GC thread.
}
}
}
}
return result;
}
Aggregations