Search in sources :

Example 51 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class DiskInitFile method writeIFRecord.

private void writeIFRecord(byte b, long regionId, String fileName, Object compactorInfo) {
    assert lock.isHeldByCurrentThread();
    try {
        int hdosSize = 1 + DR_ID_MAX_BYTES + estimateByteSize(fileName) + 1;
        if (hdosSize < 32) {
            hdosSize = 32;
        }
        HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize, Version.CURRENT);
        hdos.write(b);
        writeDiskRegionID(hdos, regionId);
        hdos.writeUTF(fileName);
        // TODO - plum the correct compactor info to this point, to optimize
        // serialization
        DataSerializer.writeObject(compactorInfo, hdos);
        hdos.write(END_OF_RECORD_ID);
        writeIFRecord(hdos, true);
    } catch (IOException ex) {
        DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
        if (!this.compactInProgress) {
            this.parent.handleDiskAccessException(dae);
        }
        throw dae;
    }
}
Also used : HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) DiskAccessException(org.apache.geode.cache.DiskAccessException) IOException(java.io.IOException)

Example 52 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class DiskInitFile method compact.

private void compact() {
    lock.lock(false);
    this.compactInProgress = true;
    try {
        try {
            this.ifRAF.close();
        } catch (IOException ignore) {
        }
        // rename the old file to tmpFile
        File tmpFile = getTempFile();
        if (this.ifFile.renameTo(tmpFile)) {
            boolean success = false;
            try {
                // create the new file
                openRAF();
                // fill the new file with data
                writeLiveData();
                success = true;
                // delete the old file
                if (!tmpFile.delete()) {
                    throw new DiskAccessException("could not delete temporary file " + tmpFile, this.parent);
                }
            } catch (DiskAccessException ignore) {
                if (logger.isDebugEnabled()) {
                    logger.debug("Exception compacting init file {}", this, ignore);
                }
            } finally {
                if (!success) {
                    // close the new one and delete it
                    try {
                        this.ifRAF.close();
                    } catch (IOException ignore2) {
                    }
                    if (!this.ifFile.delete()) {
                        throw new DiskAccessException("could not delete file " + this.ifFile, this.parent);
                    }
                    if (!tmpFile.renameTo(this.ifFile)) {
                        throw new DiskAccessException("could not rename file " + tmpFile + " to " + this.ifFile, this.parent);
                    }
                    // reopen the old file since we couldn't write the new one
                    openRAF();
                    // reset the counts to 0 so we will try a compaction again
                    // in the future but not right away.
                    this.ifLiveRecordCount = 0;
                    this.ifTotalRecordCount = 0;
                }
            }
        } else {
            // reopen the old file since we couldn't rename it
            openRAF();
            // reset the counts to 0 so we will try a compaction again
            // in the future but not right away.
            this.ifLiveRecordCount = 0;
            this.ifTotalRecordCount = 0;
        }
    } finally {
        this.compactInProgress = false;
        lock.unlock();
    }
}
Also used : DiskAccessException(org.apache.geode.cache.DiskAccessException) IOException(java.io.IOException) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 53 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class Oplog method readGemfireVersionRecord.

/**
   * @throws DiskAccessException if this file does not belong to our parent
   */
private void readGemfireVersionRecord(DataInput dis, File f) throws IOException {
    Version recoveredGFVersion = readProductVersionRecord(dis, f);
    final boolean hasDataVersion;
    if ((hasDataVersion = (recoveredGFVersion == Version.TOKEN))) {
        // actual GFE version will be the next record in this case
        byte opCode = dis.readByte();
        if (opCode != OPLOG_GEMFIRE_VERSION) {
            throw new DiskAccessException(LocalizedStrings.Oplog_UNKNOWN_OPCODE_0_FOUND_IN_DISK_OPERATION_LOG.toLocalizedString(opCode), getParent());
        }
        recoveredGFVersion = readProductVersionRecord(dis, f);
    }
    if (this.gfversion == null) {
        this.gfversion = recoveredGFVersion;
    } else {
        assert this.gfversion == recoveredGFVersion;
    }
    if (hasDataVersion) {
        byte opCode = dis.readByte();
        if (opCode != OPLOG_GEMFIRE_VERSION) {
            throw new DiskAccessException(LocalizedStrings.Oplog_UNKNOWN_OPCODE_0_FOUND_IN_DISK_OPERATION_LOG.toLocalizedString(opCode), getParent());
        }
        recoveredGFVersion = readProductVersionRecord(dis, f);
        if (this.dataVersion == null) {
            this.dataVersion = recoveredGFVersion;
        } else {
            assert this.dataVersion == recoveredGFVersion;
        }
    }
}
Also used : Version(org.apache.geode.internal.Version) DiskAccessException(org.apache.geode.cache.DiskAccessException)

Example 54 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class Oplog method initAfterRecovery.

void initAfterRecovery(boolean offline) {
    this.isRecovering = false;
    this.closed = false;
    this.deleted.set(false);
    String n = getParent().getName();
    // crf might not exist; but drf always will
    this.diskFile = new File(this.drf.f.getParentFile(), oplogSet.getPrefix() + n + "_" + this.oplogId);
    try {
        // This is a recovered oplog and we only read from its crf.
        // No need to open the drf.
        this.doneAppending = true;
        if (this.crf.f != null && !hasNoLiveValues()) {
            this.closed = false;
            // truncate crf/drf if their actual size is less than their pre-blow
            // size
            this.crf.raf = new UninterruptibleRandomAccessFile(this.crf.f, "rw");
            this.crf.RAFClosed = false;
            this.crf.channel = this.crf.raf.getChannel();
            unpreblow(this.crf, getMaxCrfSize());
            this.crf.raf.close();
            // make crf read only
            this.crf.raf = new UninterruptibleRandomAccessFile(this.crf.f, "r");
            this.crf.channel = this.crf.raf.getChannel();
            this.stats.incOpenOplogs();
            // existing behavior
            try {
                this.drf.raf = new UninterruptibleRandomAccessFile(this.drf.f, "rw");
                this.drf.RAFClosed = false;
                this.drf.channel = this.drf.raf.getChannel();
                unpreblow(this.drf, getMaxDrfSize());
            } finally {
                this.drf.raf.close();
                this.drf.raf = null;
                this.drf.RAFClosed = true;
            }
        // no need to seek to the end; we will not be writing to a recovered
        // oplog; only reading
        // this.crf.raf.seek(this.crf.currSize);
        } else if (!offline) {
            // drf exists but crf has been deleted (because it was empty).
            // I don't think the drf needs to be opened. It is only used during
            // recovery.
            // At some point the compacter my identify that it can be deleted.
            this.crf.RAFClosed = true;
            deleteCRF();
            this.closed = true;
            this.deleted.set(true);
        }
        // since we never open it on a recovered oplog
        this.drf.RAFClosed = true;
    } catch (IOException ex) {
        getParent().getCancelCriterion().checkCancelInProgress(ex);
        throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_CREATING_OPERATION_LOG_BECAUSE_0.toLocalizedString(ex), getParent());
    }
    if (hasNoLiveValues() && !offline) {
        getOplogSet().removeOplog(getOplogId(), true, getHasDeletes() ? this : null);
        if (!getHasDeletes()) {
            getOplogSet().drfDelete(this.oplogId);
            deleteFile(this.drf);
        }
    } else if (needsCompaction()) {
    // just leave it in the list it is already in
    } else {
        // remove it from the compactable list
        getOplogSet().removeOplog(getOplogId(), true, /*
               * say we are deleting so that undeletedOplogSize is not inced
               */
        null);
        // add it to the inactive list
        getOplogSet().addInactive(this);
    }
}
Also used : UninterruptibleRandomAccessFile(org.apache.geode.internal.cache.persistence.UninterruptibleRandomAccessFile) DiskAccessException(org.apache.geode.cache.DiskAccessException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) File(java.io.File) UninterruptibleRandomAccessFile(org.apache.geode.internal.cache.persistence.UninterruptibleRandomAccessFile)

Example 55 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class Oplog method readCrf.

/**
   * Return number of bytes read
   */
private long readCrf(OplogEntryIdSet deletedIds, boolean recoverValues, boolean latestOplog) {
    this.recoverNewEntryId = DiskStoreImpl.INVALID_ID;
    this.recoverModEntryId = DiskStoreImpl.INVALID_ID;
    this.recoverModEntryIdHWM = DiskStoreImpl.INVALID_ID;
    boolean readLastRecord = true;
    CountingDataInputStream dis = null;
    try {
        final LocalRegion currentRegion = LocalRegion.getInitializingRegion();
        final Version version = getProductVersionIfOld();
        final ByteArrayDataInput in = new ByteArrayDataInput();
        final HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
        int recordCount = 0;
        boolean foundDiskStoreRecord = false;
        FileInputStream fis = null;
        try {
            fis = new FileInputStream(this.crf.f);
            dis = new CountingDataInputStream(new BufferedInputStream(fis, 1024 * 1024), this.crf.f.length());
            boolean endOfLog = false;
            while (!endOfLog) {
                // long startPosition = byteCount;
                if (dis.atEndOfFile()) {
                    endOfLog = true;
                    break;
                }
                readLastRecord = false;
                byte opCode = dis.readByte();
                if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                    logger.trace(LogMarker.PERSIST_RECOVERY, "Oplog opCode={}", opCode);
                }
                switch(opCode) {
                    case OPLOG_EOF_ID:
                        // we are at the end of the oplog. So we need to back up one byte
                        dis.decrementCount();
                        endOfLog = true;
                        break;
                    case OPLOG_CONFLICT_VERSION:
                        this.readVersionTagOnlyEntry(dis, opCode);
                        break;
                    case OPLOG_NEW_ENTRY_BASE_ID:
                        {
                            long newEntryBase = dis.readLong();
                            if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                                logger.trace(LogMarker.PERSIST_RECOVERY, "newEntryBase={}", newEntryBase);
                            }
                            readEndOfRecord(dis);
                            setRecoverNewEntryId(newEntryBase);
                            recordCount++;
                        }
                        break;
                    case OPLOG_NEW_ENTRY_0ID:
                        readNewEntry(dis, opCode, deletedIds, recoverValues, currentRegion, version, in, hdos);
                        recordCount++;
                        break;
                    case OPLOG_MOD_ENTRY_1ID:
                    case OPLOG_MOD_ENTRY_2ID:
                    case OPLOG_MOD_ENTRY_3ID:
                    case OPLOG_MOD_ENTRY_4ID:
                    case OPLOG_MOD_ENTRY_5ID:
                    case OPLOG_MOD_ENTRY_6ID:
                    case OPLOG_MOD_ENTRY_7ID:
                    case OPLOG_MOD_ENTRY_8ID:
                        readModifyEntry(dis, opCode, deletedIds, recoverValues, currentRegion, version, in, hdos);
                        recordCount++;
                        break;
                    case OPLOG_MOD_ENTRY_WITH_KEY_1ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_2ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_3ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_4ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_5ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_6ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_7ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_8ID:
                        readModifyEntryWithKey(dis, opCode, deletedIds, recoverValues, currentRegion, version, in, hdos);
                        recordCount++;
                        break;
                    case OPLOG_DISK_STORE_ID:
                        readDiskStoreRecord(dis, this.crf.f);
                        foundDiskStoreRecord = true;
                        recordCount++;
                        break;
                    case OPLOG_MAGIC_SEQ_ID:
                        readOplogMagicSeqRecord(dis, this.crf.f, OPLOG_TYPE.CRF);
                        break;
                    case OPLOG_GEMFIRE_VERSION:
                        readGemfireVersionRecord(dis, this.crf.f);
                        recordCount++;
                        break;
                    case OPLOG_RVV:
                        readRVVRecord(dis, this.drf.f, false, latestOplog);
                        recordCount++;
                        break;
                    default:
                        throw new DiskAccessException(LocalizedStrings.Oplog_UNKNOWN_OPCODE_0_FOUND_IN_DISK_OPERATION_LOG.toLocalizedString(opCode), getParent());
                }
                readLastRecord = true;
            // @todo
            // if (rgn.isDestroyed()) {
            // break;
            // }
            }
        // while
        } finally {
            if (dis != null) {
                dis.close();
            }
            if (fis != null) {
                fis.close();
            }
        }
        if (!foundDiskStoreRecord && recordCount > 0) {
            throw new DiskAccessException("The oplog file \"" + this.crf.f + "\" does not belong to the init file \"" + getParent().getInitFile() + "\". Crf did not contain a disk store id.", getParent());
        }
    } catch (EOFException ignore) {
    // ignore since a partial record write can be caused by a crash
    } catch (IOException ex) {
        getParent().getCancelCriterion().checkCancelInProgress(ex);
        throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.crf.f.getPath()), ex, getParent());
    } catch (CancelException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Oplog::readOplog:Error in recovery as Cache was closed", e);
        }
    } catch (RegionDestroyedException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Oplog::readOplog:Error in recovery as Region was destroyed", e);
        }
    } catch (IllegalStateException e) {
        throw e;
    }
    // Add the Oplog size to the Directory Holder which owns this oplog,
    // so that available space is correctly calculated & stats updated.
    long byteCount = 0;
    if (!readLastRecord) {
        // this means that there was a crash
        // and hence we should not continue to read
        // the next oplog
        this.crashed = true;
        if (dis != null) {
            byteCount = dis.getFileLength();
        }
    } else {
        if (dis != null) {
            byteCount = dis.getCount();
        }
    }
    return byteCount;
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteArrayDataInput(org.apache.geode.internal.ByteArrayDataInput) FileInputStream(java.io.FileInputStream) Version(org.apache.geode.internal.Version) BufferedInputStream(java.io.BufferedInputStream) HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) DiskAccessException(org.apache.geode.cache.DiskAccessException) EOFException(java.io.EOFException) CancelException(org.apache.geode.CancelException)

Aggregations

DiskAccessException (org.apache.geode.cache.DiskAccessException)76 IOException (java.io.IOException)44 InterruptedIOException (java.io.InterruptedIOException)17 StoredObject (org.apache.geode.internal.offheap.StoredObject)13 HeapDataOutputStream (org.apache.geode.internal.HeapDataOutputStream)11 ByteBuffer (java.nio.ByteBuffer)9 Test (org.junit.Test)8 Version (org.apache.geode.internal.Version)6 File (java.io.File)5 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)5 IndexManager (org.apache.geode.cache.query.internal.index.IndexManager)5 UninterruptibleFileChannel (org.apache.geode.internal.cache.persistence.UninterruptibleFileChannel)5 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)5 Released (org.apache.geode.internal.offheap.annotations.Released)5 BufferedInputStream (java.io.BufferedInputStream)4 FileInputStream (java.io.FileInputStream)4 CancelException (org.apache.geode.CancelException)4 BytesAndBits (org.apache.geode.internal.cache.persistence.BytesAndBits)4 UninterruptibleRandomAccessFile (org.apache.geode.internal.cache.persistence.UninterruptibleRandomAccessFile)4 EOFException (java.io.EOFException)3