use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskInitFile method recover.
DiskStoreID recover() {
recoverFromFailedCompaction();
if (!this.ifFile.exists()) {
// create a UUID using the cheaper Random class.
return new DiskStoreID(UUID.randomUUID());
}
DiskStoreID result = null;
try {
FileInputStream fis = null;
CountingDataInputStream dis = null;
try {
fis = new FileInputStream(this.ifFile);
dis = new CountingDataInputStream(new BufferedInputStream(fis, 8 * 1024), this.ifFile.length());
DiskInitFileParser parser = new DiskInitFileParser(dis, this);
result = parser.parse();
this.gotEOF = parser.gotEOF();
this.nextSeekPosition = dis.getCount();
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "liveRecordCount={} totalRecordCount={}", this.ifLiveRecordCount, this.ifTotalRecordCount);
}
} finally {
if (dis != null) {
dis.close();
}
if (fis != null) {
fis.close();
}
}
for (PlaceHolderDiskRegion drv : this.drMap.values()) {
if (drv.getMyPersistentID() != null || drv.getMyInitializingID() != null) {
// Prepare each region we found in the init file for early recovery.
if (drv.isBucket() || !getDiskStore().getOwnedByRegion()) {
if (drv.isBucket() && !drv.getActualLruAlgorithm().isNone()) {
drv.prlruStats = getDiskStore().getOrCreatePRLRUStats(drv);
}
getDiskStore().getStats().incUncreatedRecoveredRegions(1);
drv.setRecoveredEntryMap(RegionMapFactory.createVM(drv, getDiskStore(), getDiskStore().getInternalRegionArguments()));
if (!getDiskStore().isOffline()) {
// schedule it for recovery since we want to recovery region data early now
getDiskStore().scheduleForRecovery(drv);
}
// else if we are validating or offlineCompacting
// then the scheduleForRecovery is called later in DiskStoreImpl
// this helps fix bug 42043
}
}
}
} catch (EOFException ex) {
// ignore since a partial record write can be caused by a crash
// throw new
// DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
// .toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (ClassNotFoundException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (CancelException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Cache was closed", ignore);
}
} catch (RegionDestroyedException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Region was destroyed", ignore);
}
} catch (IllegalStateException ex) {
if (!this.parent.isClosing()) {
throw ex;
}
}
return result;
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskStoreImpl method handleDiskAccessException.
void handleDiskAccessException(final DiskAccessException dae) {
boolean causedByRDE = LocalRegion.causedByRDE(dae);
// I think they need to keep working (for other regions) in this case.
if (causedByRDE) {
return;
}
// If another thread has already hit a DAE and is cleaning up, do nothing
if (!diskException.compareAndSet(null, dae)) {
return;
}
// log the error
final StringId sid = LocalizedStrings.LocalRegion_A_DISKACCESSEXCEPTION_HAS_OCCURRED_WHILE_WRITING_TO_THE_DISK_FOR_DISKSTORE_0_THE_CACHE_WILL_BE_CLOSED;
logger.error(LocalizedMessage.create(sid, DiskStoreImpl.this.getName()), dae);
final ThreadGroup exceptionHandlingGroup = LoggingThreadGroup.createThreadGroup("Disk Store Exception Handling Group", logger);
Thread thread = new Thread(exceptionHandlingGroup, "Disk store exception handler") {
public void run() {
try {
// now close the cache
getCache().close(sid.toLocalizedString(DiskStoreImpl.this.getName(), dae), dae);
_testHandleDiskAccessException.countDown();
} catch (Exception e) {
logger.error(LocalizedMessage.create(LocalizedStrings.LocalRegion_AN_EXCEPTION_OCCURRED_WHILE_CLOSING_THE_CACHE), e);
}
}
};
thread.start();
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskStoreImpl method initializeOwner.
/**
* Initializes the contents of any regions on this DiskStore that have been registered but are not
* yet initialized.
*/
void initializeOwner(LocalRegion lr) {
DiskRegion dr = lr.getDiskRegion();
// We don't need to do recovery for overflow regions.
if (!lr.getDataPolicy().withPersistence() || !dr.isRecreated()) {
return;
}
// while we are copying the entry map.
synchronized (currentAsyncValueRecoveryMap) {
DiskRegionView drv = lr.getDiskRegionView();
if (drv.getRecoveredEntryMap() != null) {
PersistentOplogSet oplogSet = getPersistentOplogSet(drv);
// acquire CompactorWriteLock only if the region attributes for the
// real region are different from the place holder region's
boolean releaseCompactorWriteLock = false;
if (drv.isEntriesMapIncompatible()) {
// fix bug #51097 to prevent concurrent compaction
acquireCompactorWriteLock();
releaseCompactorWriteLock = true;
}
try {
drv.copyExistingRegionMap(lr);
getStats().incUncreatedRecoveredRegions(-1);
for (Oplog oplog : oplogSet.getAllOplogs()) {
if (oplog != null) {
oplog.updateDiskRegion(lr.getDiskRegionView());
}
}
} finally {
if (releaseCompactorWriteLock) {
releaseCompactorWriteLock();
}
}
if (currentAsyncValueRecoveryMap.containsKey(drv.getId())) {
currentAsyncValueRecoveryMap.put(drv.getId(), lr);
}
return;
}
}
scheduleForRecovery(lr);
try {
// acquireReadLock(dr);
// gotLock = true;
recoverRegionsThatAreReady();
} catch (DiskAccessException dae) {
// Asif:Just rethrow t
throw dae;
} catch (RuntimeException re) {
// here
throw new DiskAccessException("RuntimeException in initializing the disk store from the disk", re, this);
}
// finally {
// if(gotLock) {
// releaseReadLock(dr);
// }
// }
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskInitFile method writeIFRecord.
private void writeIFRecord(byte b, long regionId, String fileName) {
assert lock.isHeldByCurrentThread();
try {
int hdosSize = 1 + DR_ID_MAX_BYTES + estimateByteSize(fileName) + 1;
if (hdosSize < 32) {
hdosSize = 32;
}
HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize, Version.CURRENT);
hdos.write(b);
writeDiskRegionID(hdos, regionId);
hdos.writeUTF(fileName);
hdos.write(END_OF_RECORD_ID);
writeIFRecord(hdos, true);
} catch (IOException ex) {
DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae);
}
throw dae;
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskInitFile method openRAF.
private void openRAF() {
if (DiskStoreImpl.PREALLOCATE_IF) {
openRAF2();
return;
}
try {
this.ifRAF = new RandomAccessFile(this.ifFile, getFileMode());
long len = this.ifRAF.length();
if (len != 0) {
this.ifRAF.seek(len);
}
} catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.DiskRegion_COULD_NOT_OPEN_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
}
}
Aggregations