use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskStoreCompacter method main.
public static void main(String[] args) {
String errorString = null;
String stackTraceString = null;
String diskStoreName = null;
String diskDirsStr = null;
;
String[] diskDirs = null;
;
String maxOpLogSize = null;
;
long maxOplogSize = -1;
try {
if (args.length < 3) {
throw new IllegalArgumentException("Requires 3 arguments : <diskStoreName> <diskDirs> <maxOplogSize>");
}
Properties prop = new Properties();
try {
prop.load(new StringReader(args[0] + GfshParser.LINE_SEPARATOR + args[1] + GfshParser.LINE_SEPARATOR + args[2]));
} catch (IOException e) {
throw new IllegalArgumentException("Requires 3 arguments : <diskStoreName> <diskDirs> <maxOplogSize>");
}
diskStoreName = prop.getProperty(CliStrings.COMPACT_OFFLINE_DISK_STORE__NAME);
diskDirsStr = prop.getProperty(CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS);
diskDirs = diskDirsStr.split(",");
maxOpLogSize = prop.getProperty(CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE);
maxOplogSize = Long.valueOf(maxOpLogSize);
compact(diskStoreName, diskDirs, maxOplogSize);
} catch (GemFireIOException e) {
Throwable cause = e.getCause();
if (cause instanceof IllegalStateException) {
String message = cause.getMessage();
if (stringMatches(LocalizedStrings.DiskInitFile_THE_INIT_FILE_0_DOES_NOT_EXIST.toLocalizedString("(.*)"), message)) {
errorString = CliStrings.format(CliStrings.COMPACT_OFFLINE_DISK_STORE__MSG__VERIFY_WHETHER_DISKSTORE_EXISTS_IN_0, CliUtil.arrayToString(diskDirs));
} else {
errorString = message;
}
} else if (cause instanceof DiskAccessException) {
boolean isKnownCause = false;
Throwable nestedCause = cause.getCause();
if (nestedCause instanceof IOException) {
String message = nestedCause.getMessage();
if (stringMatches(LocalizedStrings.Oplog_THE_FILE_0_IS_BEING_USED_BY_ANOTHER_PROCESS.toLocalizedString("(.*)"), message)) {
errorString = CliStrings.COMPACT_OFFLINE_DISK_STORE__MSG__DISKSTORE_IN_USE_COMPACT_DISKSTORE_CAN_BE_USED;
isKnownCause = true;
}
}
if (!isKnownCause) {
errorString = CliStrings.format(CliStrings.COMPACT_OFFLINE_DISK_STORE__MSG__CANNOT_ACCESS_DISKSTORE_0_FROM_1_CHECK_GFSH_LOGS, new Object[] { diskStoreName, CliUtil.arrayToString(diskDirs) });
}
} else {
// which are other known exceptions?
errorString = e.getMessage();
}
stackTraceString = CliUtil.stackTraceAsString(e);
} catch (IllegalArgumentException e) {
errorString = e.getMessage();
stackTraceString = CliUtil.stackTraceAsString(e);
} finally {
if (errorString != null) {
System.err.println(errorString);
}
if (stackTraceString != null) {
System.err.println(STACKTRACE_START);
System.err.println(stackTraceString);
}
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskStoreUpgrader method main.
public static void main(String[] args) {
String errorString = null;
String stackTraceString = null;
String diskStoreName = null;
String diskDirsStr = null;
String[] diskDirs = null;
String maxOpLogSize = null;
long maxOplogSize = -1;
if (args.length < 3) {
throw new IllegalArgumentException("Requires 3 arguments : <diskStoreName> <diskDirs> <maxOpLogSize>");
}
Properties prop = new Properties();
try {
prop.load(new StringReader(args[0] + GfshParser.LINE_SEPARATOR + args[1] + GfshParser.LINE_SEPARATOR + args[2]));
} catch (IOException e) {
throw new IllegalArgumentException("Requires 3 arguments : <diskStoreName> <diskDirs> <maxOpLogSize>");
}
try {
diskStoreName = prop.getProperty(CliStrings.UPGRADE_OFFLINE_DISK_STORE__NAME);
diskDirsStr = prop.getProperty(CliStrings.UPGRADE_OFFLINE_DISK_STORE__DISKDIRS);
diskDirs = diskDirsStr.split(",");
maxOpLogSize = prop.getProperty(CliStrings.UPGRADE_OFFLINE_DISK_STORE__MAXOPLOGSIZE);
maxOplogSize = Long.valueOf(maxOpLogSize);
upgrade(diskStoreName, diskDirs, maxOplogSize);
} catch (GemFireIOException e) {
Throwable cause = e.getCause();
if (cause instanceof IllegalStateException) {
String message = cause.getMessage();
if (stringMatches(LocalizedStrings.DiskInitFile_THE_INIT_FILE_0_DOES_NOT_EXIST.toLocalizedString("(.*)"), message)) {
errorString = CliStrings.format(CliStrings.UPGRADE_OFFLINE_DISK_STORE__MSG__CANNOT_LOCATE_0_DISKSTORE_IN_1, diskStoreName, CliUtil.arrayToString(diskDirs));
} else {
errorString = message;
}
} else if (cause instanceof DiskAccessException) {
boolean isKnownCause = false;
Throwable nestedCause = cause.getCause();
if (nestedCause instanceof IOException) {
String message = nestedCause.getMessage();
if (stringMatches(LocalizedStrings.Oplog_THE_FILE_0_IS_BEING_USED_BY_ANOTHER_PROCESS.toLocalizedString("(.*)"), message)) {
errorString = CliStrings.UPGRADE_OFFLINE_DISK_STORE__MSG__DISKSTORE_IN_USE_COMPACT_DISKSTORE_CAN_BE_USED;
isKnownCause = true;
}
}
if (!isKnownCause) {
errorString = CliStrings.format(CliStrings.UPGRADE_OFFLINE_DISK_STORE__MSG__CANNOT_ACCESS_DISKSTORE_0_FROM_1_CHECK_GFSH_LOGS, new Object[] { diskStoreName, CliUtil.arrayToString(diskDirs) });
}
} else {
// which are other known exceptions?
errorString = e.getMessage();
}
stackTraceString = CliUtil.stackTraceAsString(e);
} catch (IllegalArgumentException e) {
errorString = e.getMessage();
stackTraceString = CliUtil.stackTraceAsString(e);
} finally {
if (errorString != null) {
System.err.println(errorString);
}
if (stackTraceString != null) {
System.err.println(STACKTRACE_START);
System.err.println(stackTraceString);
}
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DistributedRegion method initialize.
/**
* Called while NOT holding lock on parent's subregions
*
* @throws IllegalStateException if region is not compatible with a region in another VM.
*/
@Override
protected void initialize(InputStream snapshotInputStream, InternalDistributedMember imageTarget, InternalRegionArguments internalRegionArgs) throws TimeoutException, IOException, ClassNotFoundException {
Assert.assertTrue(!isInitialized());
if (logger.isDebugEnabled()) {
logger.debug("DistributedRegion.initialize BEGIN: {}", getFullPath());
}
// if we're versioning entries we need a region-level version vector
if (this.scope.isDistributed() && this.concurrencyChecksEnabled) {
createVersionVector();
}
if (this.scope.isGlobal()) {
// create lock service eagerly now
getLockService();
}
try {
try {
PersistentMemberID persistentMemberId = null;
boolean recoverFromDisk = isRecoveryNeeded();
DiskRegion dskRgn = getDiskRegion();
if (recoverFromDisk) {
if (logger.isDebugEnabled()) {
logger.debug("DistributedRegion.getInitialImageAndRecovery: Starting Recovery");
}
// do recovery
dskRgn.initializeOwner(this);
if (logger.isDebugEnabled()) {
logger.debug("DistributedRegion.getInitialImageAndRecovery: Finished Recovery");
}
persistentMemberId = dskRgn.getMyPersistentID();
}
// Create OQL indexes before starting GII.
createOQLIndexes(internalRegionArgs, recoverFromDisk);
if (getDataPolicy().withReplication() || getDataPolicy().withPreloaded()) {
getInitialImageAndRecovery(snapshotInputStream, imageTarget, internalRegionArgs, recoverFromDisk, persistentMemberId);
} else {
new CreateRegionProcessor(this).initializeRegion();
if (snapshotInputStream != null) {
releaseBeforeGetInitialImageLatch();
loadSnapshotDuringInitialization(snapshotInputStream);
}
}
} catch (DiskAccessException dae) {
this.handleDiskAccessException(dae, true);
throw dae;
}
initMembershipRoles();
this.isInitializingThread = false;
// makes sure all latches are released if they haven't been already
super.initialize(null, null, null);
} finally {
if (this.eventTracker != null) {
this.eventTracker.setInitialized();
}
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DistTXState method updateRegionVersions.
/*
* If this is a primary member, for each entry in TXState, generate next region version and store
* in the entry.
*/
public void updateRegionVersions() {
Iterator<Map.Entry<LocalRegion, TXRegionState>> it = this.regions.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<LocalRegion, TXRegionState> me = it.next();
LocalRegion r = me.getKey();
TXRegionState txrs = me.getValue();
// Generate next region version only on the primary
if (!txrs.isCreatedDuringCommit()) {
try {
Set entries = txrs.getEntryKeys();
if (!entries.isEmpty()) {
Iterator entryIt = entries.iterator();
while (entryIt.hasNext()) {
Object key = entryIt.next();
TXEntryState txes = txrs.getTXEntryState(key);
RegionVersionVector rvv = r.getVersionVector();
if (rvv != null) {
long v = rvv.getNextVersion();
// txes.setNextRegionVersion(v);
txes.getDistTxEntryStates().setRegionVersion(v);
if (logger.isDebugEnabled()) {
logger.debug("Set next region version to " + v + " for region=" + r.getName() + "in TXEntryState for key" + key);
}
}
}
}
} catch (DiskAccessException dae) {
r.handleDiskAccessException(dae);
throw dae;
}
}
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class AbstractRegionMap method invalidate.
public boolean invalidate(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry, boolean forceCallbacks) throws EntryNotFoundException {
final boolean isDebugEnabled = logger.isDebugEnabled();
final LocalRegion owner = _getOwner();
if (owner == null) {
// "fix" for bug 32440
Assert.assertTrue(false, "The owner for RegionMap " + this + " is null for event " + event);
}
boolean didInvalidate = false;
RegionEntry invalidatedRe = null;
boolean clearOccured = false;
DiskRegion dr = owner.getDiskRegion();
boolean ownerIsInitialized = owner.isInitialized();
try {
// Fix for Bug #44431. We do NOT want to update the region and wait
// later for index INIT as region.clear() can cause inconsistency if
// happened in parallel as it also does index INIT.
IndexManager oqlIndexManager = owner.getIndexManager();
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
lockForCacheModification(owner, event);
try {
try {
if (forceNewEntry || forceCallbacks) {
boolean opCompleted = false;
RegionEntry newRe = getEntryFactory().createEntry(owner, event.getKey(), Token.REMOVED_PHASE1);
synchronized (newRe) {
try {
RegionEntry oldRe = putEntryIfAbsent(event.getKey(), newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
// proceed to phase 2 of removal.
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(event.getKey(), oldRe);
oldRe = putEntryIfAbsent(event.getKey(), newRe);
} else {
opCompleted = true;
event.setRegionEntry(oldRe);
if (oldRe.isDestroyed()) {
if (isDebugEnabled) {
logger.debug("mapInvalidate: Found DESTROYED token, not invalidated; key={}", event.getKey());
}
} else if (oldRe.isInvalid()) {
// was already invalid, do not invoke listeners or increment stat
if (isDebugEnabled) {
logger.debug("mapInvalidate: Entry already invalid: '{}'", event.getKey());
}
processVersionTag(oldRe, event);
try {
// OFFHEAP noop setting
oldRe.setValue(owner, oldRe.getValueInVM(owner));
// an already invalid to
// invalid; No need to
// call
// prepareValueForCache
// since it is an
// invalid token.
} catch (RegionClearedException e) {
// that's okay - when writing an invalid into a disk, the
// region has been cleared (including this token)
}
} else {
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled && event.noVersionReceivedFromServer()) {
// entry here
return false;
}
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
// added for cq which needs old value. rdubey
FilterProfile fp = owner.getFilterProfile();
if (!oldRe.isRemoved() && (fp != null && fp.getCqCount() > 0)) {
// OFFHEAP EntryEventImpl
Object oldValue = oldRe.getValueInVM(owner);
// this will not fault in the value.
if (oldValue == Token.NOT_AVAILABLE) {
event.setOldValue(oldRe.getValueOnDiskOrBuffer(owner));
} else {
event.setOldValue(oldValue);
}
}
boolean isCreate = false;
try {
if (oldRe.isRemoved()) {
processVersionTag(oldRe, event);
event.putNewEntry(owner, oldRe);
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
if (!oldRe.isTombstone()) {
owner.updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize());
} else {
owner.updateSizeOnCreate(event.getKey(), event.getNewValueBucketSize());
isCreate = true;
}
} else {
processVersionTag(oldRe, event);
event.putExistingEntry(owner, oldRe);
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
owner.updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize());
}
} catch (RegionClearedException e) {
// generate versionTag for the event
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
clearOccured = true;
}
owner.basicInvalidatePart2(oldRe, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
if (isCreate) {
lruEntryCreate(oldRe);
} else {
lruEntryUpdate(oldRe);
}
}
didInvalidate = true;
invalidatedRe = oldRe;
}
}
}
// synchronized oldRe
}
if (!opCompleted) {
if (forceNewEntry && event.isFromServer()) {
// CCU invalidations before 7.0, and listeners don't care
if (!FORCE_INVALIDATE_EVENT) {
event.inhibitCacheListenerNotification(true);
}
}
event.setRegionEntry(newRe);
owner.serverInvalidate(event);
if (!forceNewEntry && event.noVersionReceivedFromServer()) {
// entry here
return false;
}
try {
ownerIsInitialized = owner.isInitialized();
if (!ownerIsInitialized && owner.getDataPolicy().withReplication()) {
final int oldSize = owner.calculateRegionEntryValueSize(newRe);
invalidateEntry(event, newRe, oldSize);
} else {
invalidateNewEntry(event, owner, newRe);
}
} catch (RegionClearedException e) {
// TODO: deltaGII: do we even need RegionClearedException?
// generate versionTag for the event
owner.recordEvent(event);
clearOccured = true;
}
owner.basicInvalidatePart2(newRe, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
lruEntryCreate(newRe);
incEntryCount(1);
}
opCompleted = true;
didInvalidate = true;
invalidatedRe = newRe;
// for this invalidate
if (!forceNewEntry) {
removeEntry(event.getKey(), newRe, false);
}
}
// !opCompleted
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
} finally {
if (!opCompleted) {
removeEntry(event.getKey(), newRe, false);
}
}
}
// synchronized newRe
} else // forceNewEntry
{
// !forceNewEntry
boolean retry = true;
while (retry) {
retry = false;
boolean entryExisted = false;
RegionEntry re = getEntry(event.getKey());
RegionEntry tombstone = null;
boolean haveTombstone = false;
if (re != null && re.isTombstone()) {
tombstone = re;
haveTombstone = true;
re = null;
}
if (re == null) {
ownerIsInitialized = owner.isInitialized();
if (!ownerIsInitialized) {
// when GII message arrived or processed later than invalidate
// message, the entry should be created as placeholder
RegionEntry newRe = haveTombstone ? tombstone : getEntryFactory().createEntry(owner, event.getKey(), Token.INVALID);
synchronized (newRe) {
if (haveTombstone && !tombstone.isTombstone()) {
// state of the tombstone has changed so we need to retry
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
re = putEntryIfAbsent(event.getKey(), newRe);
if (re == tombstone) {
// pretend we don't have an entry
re = null;
}
}
} else if (owner.getServerProxy() != null) {
Object sync = haveTombstone ? tombstone : new Object();
synchronized (sync) {
if (haveTombstone && !tombstone.isTombstone()) {
// bug 45295: state of the tombstone has changed so we need to retry
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
// bug #43287 - send event to server even if it's not in the client (LRU may
// have evicted it)
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled) {
if (event.getVersionTag() == null) {
// entry here
return false;
} else if (tombstone != null) {
processVersionTag(tombstone, event);
try {
if (!tombstone.isTombstone()) {
if (isDebugEnabled) {
logger.debug("tombstone is no longer a tombstone. {}:event={}", tombstone, event);
}
}
tombstone.setValue(owner, Token.TOMBSTONE);
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
// update the tombstone's version to prevent an older CCU/putAll from
// overwriting it
owner.rescheduleTombstone(tombstone, event.getVersionTag());
}
}
}
entryExisted = true;
}
}
if (re != null) {
// normal invalidate operation
synchronized (re) {
if (!event.isOriginRemote() && event.getOperation().isExpiration()) {
// used by a tx.
if (re.isInUseByTransaction()) {
return false;
}
}
if (re.isTombstone() || (!re.isRemoved() && !re.isDestroyed())) {
entryExisted = true;
if (re.isInvalid()) {
// stat
if (isDebugEnabled) {
logger.debug("Invalidate: Entry already invalid: '{}'", event.getKey());
}
if (event.getVersionTag() != null && owner.getVersionVector() != null) {
owner.getVersionVector().recordVersion((InternalDistributedMember) event.getDistributedMember(), event.getVersionTag());
}
} else {
// previous value not invalid
event.setRegionEntry(re);
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled && event.noVersionReceivedFromServer()) {
// entry here
if (isDebugEnabled) {
logger.debug("returning early because server did not generate a version stamp for this event:{}", event);
}
return false;
}
// in case of overflow to disk we need the old value for cqs.
if (owner.getFilterProfile().getCqCount() > 0) {
// use to be getValue and can cause dead lock rdubey.
if (re.isValueNull()) {
event.setOldValue(re.getValueOnDiskOrBuffer(owner));
} else {
Object v = re.getValueInVM(owner);
// OFFHEAP escapes to EntryEventImpl oldValue
event.setOldValue(v);
}
}
final boolean oldWasTombstone = re.isTombstone();
final int oldSize = _getOwner().calculateRegionEntryValueSize(re);
try {
invalidateEntry(event, re, oldSize);
} catch (RegionClearedException rce) {
// generate versionTag for the event
EntryLogger.logInvalidate(event);
_getOwner().recordEvent(event);
clearOccured = true;
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
owner.basicInvalidatePart2(re, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
if (oldWasTombstone) {
lruEntryCreate(re);
} else {
lruEntryUpdate(re);
}
}
didInvalidate = true;
invalidatedRe = re;
}
// previous value not invalid
}
}
// synchronized re
} else // re != null
{
// At this point, either it's not in GII mode, or the placeholder
// is in region, do nothing
}
if (!entryExisted) {
owner.checkEntryNotFound(event.getKey());
}
}
// while(retry)
}
// !forceNewEntry
} catch (DiskAccessException dae) {
invalidatedRe = null;
didInvalidate = false;
this._getOwner().handleDiskAccessException(dae);
throw dae;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
if (invalidatedRe != null) {
owner.basicInvalidatePart3(invalidatedRe, event, invokeCallbacks);
}
if (didInvalidate && !clearOccured) {
try {
lruUpdateCallback();
} catch (DiskAccessException dae) {
this._getOwner().handleDiskAccessException(dae);
throw dae;
}
} else if (!didInvalidate) {
resetThreadLocals();
}
}
return didInvalidate;
} finally {
if (ownerIsInitialized) {
forceInvalidateEvent(event, owner);
}
}
} finally {
releaseCacheModificationLock(owner, event);
}
}
Aggregations