Search in sources :

Example 6 with StorageRandomAccessFile

use of org.apache.derby.io.StorageRandomAccessFile in project derby by apache.

the class LogToFile method readControlFile.

/*
		Carefully read the content of the control file.

		<P> MT- read only
	*/
private long readControlFile(StorageFile logControlFileName, Properties startParams) throws IOException, StandardException {
    StorageRandomAccessFile logControlFile = null;
    ByteArrayInputStream bais = null;
    DataInputStream dais = null;
    logControlFile = privRandomAccessFile(logControlFileName, "r");
    boolean upgradeNeeded = false;
    long value = LogCounter.INVALID_LOG_INSTANT;
    long onDiskChecksum = 0;
    long controlFilelength = logControlFile.length();
    byte[] barray = null;
    try {
        // skip reading checksum  control file is before 1.5
        if (controlFilelength < 16)
            onDiskChecksum = -1;
        else if (controlFilelength == 16) {
            barray = new byte[16];
            logControlFile.readFully(barray);
        } else if (controlFilelength > 16) {
            barray = new byte[(int) logControlFile.length() - 8];
            logControlFile.readFully(barray);
            onDiskChecksum = logControlFile.readLong();
            if (onDiskChecksum != 0) {
                checksum.reset();
                checksum.update(barray, 0, barray.length);
            }
        }
        if (onDiskChecksum == checksum.getValue() || onDiskChecksum == 0) {
            bais = new ByteArrayInputStream(barray);
            dais = new DataInputStream(bais);
            if (dais.readInt() != fid) {
                throw StandardException.newException(SQLState.LOG_INCOMPATIBLE_FORMAT, dataDirectory);
            }
            int obsoleteVersion = dais.readInt();
            value = dais.readLong();
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "log control file ckp instance = " + LogCounter.toDebugString(value));
            }
            // from version 1.5 onward, we added an int for storing JBMS
            // version and an int for storing checkpoint interval
            // and log switch interval
            onDiskMajorVersion = dais.readInt();
            onDiskMinorVersion = dais.readInt();
            int dbBuildNumber = dais.readInt();
            int flags = dais.readByte();
            // check if the database was booted previously at any time with
            // derby.system.durability=test mode
            // If yes, then on a boot error we report that this setting is
            // probably the cause for the error and also log a warning
            // in the derby.log that this mode was set previously
            wasDBInDurabilityTestModeNoSync = (flags & IS_DURABILITY_TESTMODE_NO_SYNC_FLAG) != 0;
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "log control file, was derby.system.durability set to test = " + wasDBInDurabilityTestModeNoSync);
            }
            onDiskBeta = (flags & IS_BETA_FLAG) != 0;
            if (onDiskBeta) {
                // version
                if (!jbmsVersion.isBeta() || onDiskMajorVersion != jbmsVersion.getMajorVersion() || onDiskMinorVersion != jbmsVersion.getMinorVersion()) {
                    boolean forceBetaUpgrade = false;
                    if (SanityManager.DEBUG) {
                        // give ourselves an out for this beta check for debugging purposes
                        if (SanityManager.DEBUG_ON("forceBetaUpgrade")) {
                            Monitor.logMessage("WARNING !! : forcing beta upgrade.");
                            forceBetaUpgrade = true;
                        }
                    }
                    if (!forceBetaUpgrade) {
                        throw StandardException.newException(SQLState.LOG_CANNOT_UPGRADE_BETA, dataDirectory, ProductVersionHolder.simpleVersionString(onDiskMajorVersion, onDiskMinorVersion, onDiskBeta));
                    }
                }
            }
            // 
            if (onDiskMajorVersion > jbmsVersion.getMajorVersion() || (onDiskMajorVersion == jbmsVersion.getMajorVersion() && onDiskMinorVersion > jbmsVersion.getMinorVersion())) {
                // upgrade is allowed.
                throw StandardException.newException(SQLState.LOG_INCOMPATIBLE_VERSION, dataDirectory, ProductVersionHolder.simpleVersionString(onDiskMajorVersion, onDiskMinorVersion, onDiskBeta));
            }
            // maintaince (point) versions should not require an upgrade.
            if ((onDiskMajorVersion != jbmsVersion.getMajorVersion()) || (onDiskMinorVersion != jbmsVersion.getMinorVersion())) {
                upgradeNeeded = true;
            }
            // except incase of upgrade from versions <= 3.5
            if (onDiskChecksum == 0 && (!(onDiskMajorVersion <= 3 && onDiskMinorVersion <= 5) || onDiskMajorVersion == 0))
                value = LogCounter.INVALID_LOG_INSTANT;
        }
    } finally {
        if (logControlFile != null)
            logControlFile.close();
        if (bais != null)
            bais.close();
        if (dais != null)
            dais.close();
    }
    if (upgradeNeeded) {
        if (isFullUpgrade(startParams, ProductVersionHolder.simpleVersionString(onDiskMajorVersion, onDiskMinorVersion, onDiskBeta))) {
            onDiskMajorVersion = jbmsVersion.getMajorVersion();
            onDiskMinorVersion = jbmsVersion.getMinorVersion();
            onDiskBeta = jbmsVersion.isBeta();
            if (!writeControlFile(logControlFileName, value)) {
                throw StandardException.newException(SQLState.LOG_CONTROL_FILE, logControlFileName);
            }
        }
    }
    return value;
}
Also used : StorageRandomAccessFile(org.apache.derby.io.StorageRandomAccessFile) ByteArrayInputStream(java.io.ByteArrayInputStream) DataInputStream(java.io.DataInputStream)

Example 7 with StorageRandomAccessFile

use of org.apache.derby.io.StorageRandomAccessFile in project derby by apache.

the class RAFContainer method run.

// PrivilegedExceptionAction method
public Object run() throws StandardException {
    switch(actionCode) {
        case GET_FILE_NAME_ACTION:
            return privGetFileName(actionIdentity, actionStub, actionErrorOK, actionTryAlternatePath);
        case CREATE_CONTAINER_ACTION:
            {
                StorageFile file = privGetFileName(actionIdentity, false, false, false);
                try {
                    if (file.exists()) {
                        // hasn't been called.
                        throw StandardException.newException(SQLState.FILE_EXISTS, file);
                    }
                } catch (SecurityException se) {
                    throw StandardException.newException(SQLState.FILE_CREATE, se, file);
                }
                try {
                    // OK not to force WAL here, in fact, this operation
                    // preceeds the creation of the log record to ensure
                    // sufficient space.
                    dataFactory.writeInProgress();
                    try {
                        fileData = file.getRandomAccessFile("rw");
                        file.limitAccessToOwner();
                    } finally {
                        dataFactory.writeFinished();
                    }
                    // This container format specifies that the first page is
                    // an allocation page and the container information is
                    // stored within it.  The allocation page needs to be
                    // somewhat formatted because if the system crashed after
                    // the create container log operation is written, it needs
                    // to be well formed enough to get the container
                    // information back out of it.
                    // 
                    // Don't try to go thru the page cache here because the
                    // container object cannot be found in the container cache
                    // at this point yet.  However, if we use the page cache
                    // to store the first allocation page, then in order to
                    // write itself out, it needs to ask the container to do
                    // so, which is going to create a deadlock.  The
                    // allocation page cannot write itself out without going
                    // thru the container because it doesn't know where its
                    // offset is.  Here we effectively hardwire page 0 at
                    // offset 0 of the container file to be the first
                    // allocation page.
                    // create an embryonic page - if this is not a temporary
                    // container, synchronously write out the file header.
                    // Need to set it now. After writeRAFHeader
                    canUpdate = true;
                    // may be too late in case that method's IO
                    // is interrupted and container needs
                    // reopening. To get the correct "rw" mode
                    // we need canUpdate to be true.
                    writeRAFHeader(actionIdentity, fileData, true, (actionIdentity.getSegmentId() != ContainerHandle.TEMPORARY_SEGMENT));
                } catch (IOException ioe) {
                    canUpdate = false;
                    boolean fileDeleted;
                    try {
                        fileDeleted = privRemoveFile(file);
                    } catch (SecurityException se) {
                        throw StandardException.newException(SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, se.toString());
                    }
                    if (!fileDeleted) {
                        throw StandardException.newException(SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, ioe.toString());
                    }
                    throw StandardException.newException(SQLState.FILE_CREATE, ioe, file);
                }
                return null;
            }
        case REMOVE_FILE_ACTION:
            return privRemoveFile(actionFile) ? this : null;
        case OPEN_CONTAINER_ACTION:
            {
                // is this a stub?
                boolean isStub = false;
                StorageFile file = privGetFileName(actionIdentity, false, true, true);
                if (file == null)
                    return null;
                try {
                    if (!file.exists()) {
                        // file does not exist, may be it has been stubbified
                        file = privGetFileName(actionIdentity, true, true, true);
                        if (!file.exists())
                            return null;
                        isStub = true;
                    }
                } catch (SecurityException se) {
                    throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, se);
                }
                canUpdate = false;
                try {
                    if (!dataFactory.isReadOnly() && file.canWrite())
                        canUpdate = true;
                } catch (SecurityException se) {
                // just means we can't write to it.
                }
                fileName = file.toString();
                try {
                    fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r");
                    readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET));
                    if (SanityManager.DEBUG) {
                        if (isStub)
                            SanityManager.ASSERT(getDroppedState() && getCommittedDropState(), "a stub failed to set drop state");
                    }
                } catch (IOException ioe) {
                    if (isStub) {
                        throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName));
                    }
                    // maybe it is being stubbified... try that
                    StorageFile stub = privGetFileName(actionIdentity, true, true, true);
                    if (stub.exists()) {
                        try {
                            boolean delete_status = privRemoveFile(file);
                            if (SanityManager.DEBUG) {
                                if (!delete_status) {
                                    SanityManager.THROWASSERT("delete of file (" + file + ") failed.");
                                }
                            }
                            fileData = stub.getRandomAccessFile(canUpdate ? "rw" : "r");
                            readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET));
                        } catch (IOException ioe2) {
                            throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe2, getIdentity() != null ? getIdentity().toString() : "unknown", "delete-stub", fileName));
                        }
                    // RESOLVE: this is a temporary hack
                    } else
                        throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName));
                }
                return this;
            }
        // end of case OPEN_CONTAINER_ACTION
        case REOPEN_CONTAINER_ACTION:
            {
                StorageFile file = privGetFileName(actionIdentity, false, true, true);
                synchronized (this) {
                    try {
                        fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r");
                    } catch (FileNotFoundException ioe) {
                        throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, (getIdentity() != null ? getIdentity().toString() : "unknown"), "read", fileName));
                    }
                }
                return this;
            }
        case STUBBIFY_ACTION:
            {
                StorageFile file = privGetFileName(actionIdentity, false, false, true);
                StorageFile stub = privGetFileName(actionIdentity, true, false, false);
                StorageRandomAccessFile stubData = null;
                try {
                    if (!stub.exists()) {
                        // write the header to the stub
                        stubData = stub.getRandomAccessFile("rw");
                        stub.limitAccessToOwner();
                        writeRAFHeader(actionIdentity, stubData, true, /* create */
                        true);
                        /* sync */
                        stubData.close();
                        stubData = null;
                    }
                    // Force WAL and check for database corruption before removing file.
                    // This is one operation where the container is changed on disk
                    // directly without going thru the container cache, which otherwise
                    // would have force WAL.  Take care of it here.
                    dataFactory.flush(actionInstant);
                    // try to remove the container file
                    // fileDate is not null only if we are redoing a removeContainer
                    // (stubbify) operation.  Then fileData acutally is opened against
                    // the stub and the original container file does not exist.
                    // Then we need to close it here because this method is called by
                    // cache.remove and nobody will be able to see fileData after this.
                    privRemoveFile(file);
                } catch (SecurityException se) {
                    throw StandardException.newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se.toString());
                } catch (IOException ioe) {
                    // (half-baked) stub
                    try {
                        if (stubData != null) {
                            stubData.close();
                            stub.delete();
                            stubData = null;
                        }
                        if (fileData != null) {
                            fileData.close();
                            fileData = null;
                        }
                    } catch (IOException ioe2) {
                        throw StandardException.newException(SQLState.FILE_CANNOT_REMOVE_FILE, ioe2, file, ioe.toString());
                    } catch (SecurityException se) {
                        throw StandardException.newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se.toString());
                    }
                }
                // let the data factory know about this the stub file;It
                // could  remove when next checkpoint occurs if it's not necessary for recovery
                dataFactory.stubFileToRemoveAfterCheckPoint(stub, actionInstant, getIdentity());
                return null;
            }
        case GET_RANDOM_ACCESS_FILE_ACTION:
            {
                try {
                    boolean exists = actionFile.exists();
                    Object result = actionFile.getRandomAccessFile("rw");
                    if (!exists) {
                        actionFile.limitAccessToOwner();
                    }
                    return result;
                } catch (IOException ioe) {
                    throw StandardException.newException(SQLState.FILE_CREATE, ioe, actionFile.getPath());
                }
            }
    }
    // end of switch
    return null;
}
Also used : StorageRandomAccessFile(org.apache.derby.io.StorageRandomAccessFile) StorageFile(org.apache.derby.io.StorageFile) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Example 8 with StorageRandomAccessFile

use of org.apache.derby.io.StorageRandomAccessFile in project derby by apache.

the class RAFContainer method encryptOrDecryptContainer.

/**
 * Creates encrypted or decrypted version of the container.
 *
 * Reads all the pages of the container from the original container
 * through the page cache, then either encrypts page data with the new
 * encryption mechanism or leaves the page data un-encrypted, and finally
 * writes the data to the specified new container file.
 * <p>
 * The encryption and decryption engines used to carry out the
 * cryptographic operation(s) are configured through the raw store, and
 * accessed via the data factory. Note that the pages have already been
 * decrypted before being put into the page cache.
 *
 * @param handle the container handle
 * @param newFilePath file to store the new version of the container in
 * @param doEncrypt tells whether to encrypt or not
 * @exception StandardException Derby Standard error policy
 */
protected void encryptOrDecryptContainer(BaseContainerHandle handle, String newFilePath, boolean doEncrypt) throws StandardException {
    BasePage page = null;
    StorageFile newFile = dataFactory.getStorageFactory().newStorageFile(newFilePath);
    StorageRandomAccessFile newRaf = null;
    try {
        long lastPageNumber = getLastPageNumber(handle);
        newRaf = getRandomAccessFile(newFile);
        byte[] encryptionBuf = null;
        if (doEncrypt) {
            encryptionBuf = new byte[pageSize];
        }
        // container file after processing the pages.
        for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER; pageNumber <= lastPageNumber; pageNumber++) {
            page = getLatchedPage(handle, pageNumber);
            // Update the page array before writing to the disk.
            // An update consists of adding the container header, or
            // (re-)encrypting the data.
            byte[] dataToWrite = updatePageArray(pageNumber, page.getPageArray(), encryptionBuf, true);
            newRaf.write(dataToWrite, 0, pageSize);
            // unlatch releases page from cache.
            page.unlatch();
            page = null;
        }
        // sync the new version of the container.
        newRaf.sync();
        newRaf.close();
        newRaf = null;
    } catch (IOException ioe) {
        throw StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", doEncrypt ? "encrypt" : "decrypt", newFilePath);
    } finally {
        if (page != null) {
            page.unlatch();
            page = null;
        }
        if (newRaf != null) {
            try {
                newRaf.close();
            } catch (IOException ioe) {
                newRaf = null;
                throw StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", doEncrypt ? "encrypt-close" : "decrypt-close", newFilePath);
            }
        }
    }
}
Also used : StorageRandomAccessFile(org.apache.derby.io.StorageRandomAccessFile) StorageFile(org.apache.derby.io.StorageFile) IOException(java.io.IOException)

Example 9 with StorageRandomAccessFile

use of org.apache.derby.io.StorageRandomAccessFile in project derby by apache.

the class LogToFile method recover.

/**
 *		Recover the rawStore to a consistent state using the log.
 *
 *		<P>
 *		In this implementation, the log is a stream of log records stored in
 *		one or more flat files.  Recovery is done in 2 passes: redo and undo.
 *		<BR> <B>Redo pass</B>
 *		<BR> In the redo pass, reconstruct the state of the rawstore by
 *		repeating exactly what happened before as recorded in the log.
 *		<BR><B>Undo pass</B>
 *		<BR> In the undo pass, all incomplete transactions are rolled back in
 *		the order from the most recently started to the oldest.
 *
 *		<P>MT - synchronization provided by caller - RawStore boot.
 *		This method is guaranteed to be the only method being called and can
 *		assume single thread access on all fields.
 *
 *		@see Loggable#needsRedo
 *		@see FileLogger#redo
 *
 *		@exception StandardException Standard Derby error policy
 */
public void recover(DataFactory df, TransactionFactory tf) throws StandardException {
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(df != null, "data factory == null");
    }
    checkCorrupt();
    dataFactory = df;
    // to encrypt checksum log records.
    if (firstLog != null)
        logOut = new LogAccessFile(this, firstLog, logBufferSize);
    // initialization without causing serialization conflicts.
    if (inReplicationSlaveMode) {
        synchronized (slaveRecoveryMonitor) {
            // while this thread waited on the monitor
            while (inReplicationSlaveMode && (allowedToReadFileNumber < bootTimeLogFileNumber)) {
                // Wait until the first log file can be read.
                if (replicationSlaveException != null) {
                    throw replicationSlaveException;
                }
                try {
                    slaveRecoveryMonitor.wait();
                } catch (InterruptedException ie) {
                    InterruptStatus.setInterrupted();
                }
            }
        }
    }
    if (recoveryNeeded) {
        try {
            // ///////////////////////////////////////////////////////////
            // 
            // During boot time, the log control file is accessed and
            // bootTimeLogFileNumber is determined.  LogOut is not set up.
            // bootTimeLogFileNumber is the log file the latest checkpoint
            // lives in,
            // or 1.  It may not be the latest log file (the system may have
            // crashed between the time a new log was generated and the
            // checkpoint log written), that can only be determined at the
            // end of recovery redo.
            // 
            // ///////////////////////////////////////////////////////////
            FileLogger logger = (FileLogger) getLogger();
            // ///////////////////////////////////////////////////////////
            if (checkpointInstant != LogCounter.INVALID_LOG_INSTANT) {
                currentCheckpoint = findCheckpoint(checkpointInstant, logger);
            }
            // beginning of the first log file
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(DUMP_LOG_ONLY)) {
                    currentCheckpoint = null;
                    System.out.println("Dump log only");
                    // unless otherwise specified, 1st log file starts at 1
                    String beginLogFileNumber = PropertyUtil.getSystemProperty(DUMP_LOG_FROM_LOG_FILE);
                    if (beginLogFileNumber != null) {
                        bootTimeLogFileNumber = Long.valueOf(beginLogFileNumber).longValue();
                    } else {
                        bootTimeLogFileNumber = 1;
                    }
                }
            }
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON("setCheckpoint")) {
                    currentCheckpoint = null;
                    System.out.println("Set Checkpoint.");
                    // unless otherwise specified, 1st log file starts at 1
                    String checkpointStartLogStr = PropertyUtil.getSystemProperty("derby.storage.checkpointStartLog");
                    String checkpointStartOffsetStr = PropertyUtil.getSystemProperty("derby.storage.checkpointStartOffset");
                    if ((checkpointStartLogStr != null) && (checkpointStartOffsetStr != null)) {
                        checkpointInstant = LogCounter.makeLogInstantAsLong(Long.valueOf(checkpointStartLogStr).longValue(), Long.valueOf(checkpointStartOffsetStr).longValue());
                    } else {
                        SanityManager.THROWASSERT("must set derby.storage.checkpointStartLog and derby.storage.checkpointStartOffset, if setting setCheckpoint.");
                    }
                    currentCheckpoint = findCheckpoint(checkpointInstant, logger);
                }
            }
            long redoLWM = LogCounter.INVALID_LOG_INSTANT;
            long undoLWM = LogCounter.INVALID_LOG_INSTANT;
            long ttabInstant = LogCounter.INVALID_LOG_INSTANT;
            StreamLogScan redoScan = null;
            if (currentCheckpoint != null) {
                Formatable transactionTable = null;
                // RESOLVE: sku
                // currentCheckpoint.getTransactionTable();
                // need to set the transaction table before the undo
                tf.useTransactionTable(transactionTable);
                redoLWM = currentCheckpoint.redoLWM();
                undoLWM = currentCheckpoint.undoLWM();
                if (transactionTable != null)
                    ttabInstant = checkpointInstant;
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(DBG_FLAG)) {
                        SanityManager.DEBUG(DBG_FLAG, "Found checkpoint at " + LogCounter.toDebugString(checkpointInstant) + " " + currentCheckpoint.toString());
                    }
                }
                firstLogFileNumber = LogCounter.getLogFileNumber(redoLWM);
                // figure out where the first interesting log file is.
                if (LogCounter.getLogFileNumber(undoLWM) < firstLogFileNumber) {
                    firstLogFileNumber = LogCounter.getLogFileNumber(undoLWM);
                }
                // if the checkpoint record doesn't have a transaction
                // table, we need to rebuild it by scanning the log from
                // the undoLWM.  If it does have a transaction table, we
                // only need to scan the log from the redoLWM
                redoScan = (StreamLogScan) openForwardsScan(undoLWM, (LogInstant) null);
            } else {
                // no checkpoint
                tf.useTransactionTable((Formatable) null);
                long start = LogCounter.makeLogInstantAsLong(bootTimeLogFileNumber, LOG_FILE_HEADER_SIZE);
                // no checkpoint, start redo from the beginning of the
                // file - assume this is the first log file
                firstLogFileNumber = bootTimeLogFileNumber;
                redoScan = (StreamLogScan) openForwardsScan(start, (LogInstant) null);
            }
            // open a transaction that is used for redo and rollback
            RawTransaction recoveryTransaction = tf.startTransaction(rawStoreFactory, getContextService().getCurrentContextManager(), AccessFactoryGlobals.USER_TRANS_NAME);
            // make this transaction aware that it is a recovery transaction
            // and don't spew forth post commit work while replaying the log
            recoveryTransaction.recoveryTransaction();
            // ///////////////////////////////////////////////////////////
            // 
            // Redo loop - in FileLogger
            // 
            // ///////////////////////////////////////////////////////////
            // 
            // set log factory state to inRedo so that if redo caused any
            // dirty page to be written from the cache, it won't flush the
            // log since the end of the log has not been determined and we
            // know the log record that caused the page to change has
            // already been written to the log.  We need the page write to
            // go thru the log factory because if the redo has a problem,
            // the log factory is corrupt and the only way we know not to
            // write out the page in a checkpoint is if it check with the
            // log factory, and that is done via a flush - we use the WAL
            // protocol to stop corrupt pages from writing to the disk.
            // 
            inRedo = true;
            long logEnd = logger.redo(recoveryTransaction, tf, redoScan, redoLWM, ttabInstant);
            inRedo = false;
            // Replication slave: When recovery has completed the
            // redo pass, the database is no longer in replication
            // slave mode and only the recover thread will access
            // this object until recover has complete. We
            // therefore do not need two versions of the log file
            // number anymore. From this point on, logFileNumber
            // is used for all references to the current log file
            // number; bootTimeLogFileNumber is no longer used.
            logFileNumber = bootTimeLogFileNumber;
            // the database and prevent anyone from using the log
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY)) {
                    Monitor.logMessage("_____________________________________________________");
                    Monitor.logMessage("\n\t\t Log dump finished");
                    Monitor.logMessage("_____________________________________________________");
                    // just in case, it has not been set anyway
                    logOut = null;
                    return;
                }
            }
            // ///////////////////////////////////////////////////////////
            // 
            // determine where the log ends
            // 
            // ///////////////////////////////////////////////////////////
            StorageRandomAccessFile theLog = null;
            // some way ...
            if (logEnd == LogCounter.INVALID_LOG_INSTANT) {
                Monitor.logTextMessage(MessageId.LOG_LOG_NOT_FOUND);
                StorageFile logFile = getLogFileName(logFileNumber);
                if (privExists(logFile)) {
                    // otherwise, skip it
                    if (!privDelete(logFile)) {
                        logFile = getLogFileName(++logFileNumber);
                    }
                }
                IOException accessException = null;
                try {
                    theLog = privRandomAccessFile(logFile, "rw");
                } catch (IOException ioe) {
                    theLog = null;
                    accessException = ioe;
                }
                if (theLog == null || !privCanWrite(logFile)) {
                    if (theLog != null)
                        theLog.close();
                    theLog = null;
                    Monitor.logTextMessage(MessageId.LOG_CHANGED_DB_TO_READ_ONLY);
                    if (accessException != null)
                        Monitor.logThrowable(accessException);
                    ReadOnlyDB = true;
                } else {
                    try {
                        // no previous log file or previous log position
                        if (!initLogFile(theLog, logFileNumber, LogCounter.INVALID_LOG_INSTANT)) {
                            throw markCorrupt(StandardException.newException(SQLState.LOG_SEGMENT_NOT_EXIST, logFile.getPath()));
                        }
                    } catch (IOException ioe) {
                        throw markCorrupt(StandardException.newException(SQLState.LOG_IO_ERROR, ioe));
                    }
                    // successfully init'd the log file - set up markers,
                    // and position at the end of the log.
                    setEndPosition(theLog.getFilePointer());
                    lastFlush = endPosition;
                    // and reopen the file in rwd mode.
                    if (isWriteSynced) {
                        // extend the file by wring zeros to it
                        preAllocateNewLogFile(theLog);
                        theLog.close();
                        theLog = openLogFileInWriteMode(logFile);
                        // postion the log at the current end postion
                        theLog.seek(endPosition);
                    }
                    if (SanityManager.DEBUG) {
                        SanityManager.ASSERT(endPosition == LOG_FILE_HEADER_SIZE, "empty log file has wrong size");
                    }
                    // because we already incrementing the log number
                    // here, no special log switch required for
                    // backup recoveries.
                    logSwitchRequired = false;
                }
            } else {
                // logEnd is the instant of the next log record in the log
                // it is used to determine the last known good position of
                // the log
                logFileNumber = LogCounter.getLogFileNumber(logEnd);
                ReadOnlyDB = df.isReadOnly();
                StorageFile logFile = getLogFileName(logFileNumber);
                if (!ReadOnlyDB) {
                    // if datafactory doesn't think it is readonly, we can
                    // do some futher test of our own
                    IOException accessException = null;
                    try {
                        if (isWriteSynced)
                            theLog = openLogFileInWriteMode(logFile);
                        else
                            theLog = privRandomAccessFile(logFile, "rw");
                    } catch (IOException ioe) {
                        theLog = null;
                        accessException = ioe;
                    }
                    if (theLog == null || !privCanWrite(logFile)) {
                        if (theLog != null)
                            theLog.close();
                        theLog = null;
                        Monitor.logTextMessage(MessageId.LOG_CHANGED_DB_TO_READ_ONLY);
                        if (accessException != null)
                            Monitor.logThrowable(accessException);
                        ReadOnlyDB = true;
                    }
                }
                if (!ReadOnlyDB) {
                    setEndPosition(LogCounter.getLogFilePosition(logEnd));
                    // find out if log had incomplete log records at the end.
                    if (redoScan.isLogEndFuzzy()) {
                        theLog.seek(endPosition);
                        long eof = theLog.length();
                        Monitor.logTextMessage(MessageId.LOG_INCOMPLETE_LOG_RECORD, logFile, endPosition, eof);
                        /* Write zeros from incomplete log record to end of file */
                        long nWrites = (eof - endPosition) / logBufferSize;
                        int rBytes = (int) ((eof - endPosition) % logBufferSize);
                        byte[] zeroBuf = new byte[logBufferSize];
                        // write the zeros to file
                        while (nWrites-- > 0) theLog.write(zeroBuf);
                        if (rBytes != 0)
                            theLog.write(zeroBuf, 0, rBytes);
                        if (!isWriteSynced)
                            syncFile(theLog);
                    }
                    if (SanityManager.DEBUG) {
                        if (theLog.length() != endPosition) {
                            SanityManager.ASSERT(theLog.length() > endPosition, "log end > log file length, bad scan");
                        }
                    }
                    // set the log to the true end position,
                    // and not the end of the file
                    lastFlush = endPosition;
                    theLog.seek(endPosition);
                }
            }
            if (theLog != null) {
                if (logOut != null) {
                    // Close the currently open log file, if there is
                    // one. DERBY-5937.
                    logOut.close();
                }
                logOut = new LogAccessFile(this, theLog, logBufferSize);
            }
            if (logSwitchRequired)
                switchLogFile();
            boolean noInFlightTransactions = tf.noActiveUpdateTransaction();
            if (ReadOnlyDB) {
                // dirty buffer
                if (!noInFlightTransactions) {
                    throw StandardException.newException(SQLState.LOG_READ_ONLY_DB_NEEDS_UNDO);
                }
            }
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "About to call undo(), transaction table =" + tf.getTransactionTable());
            }
            if (!noInFlightTransactions) {
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "In recovery undo, rollback inflight transactions");
                }
                tf.rollbackAllTransactions(recoveryTransaction, rawStoreFactory);
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "finish recovery undo,");
                }
            } else {
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "No in flight transaction, no recovery undo work");
                }
            }
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "About to call rePrepare(), transaction table =" + tf.getTransactionTable());
            }
            tf.handlePreparedXacts(rawStoreFactory);
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "Finished rePrepare(), transaction table =" + tf.getTransactionTable());
            }
            // ///////////////////////////////////////////////////////////
            // 
            // End of recovery.
            // 
            // ///////////////////////////////////////////////////////////
            // recovery is finished.  Close the transaction
            recoveryTransaction.close();
            // notify the dataFactory that recovery is completed,
            // but before the checkpoint is written.
            dataFactory.postRecovery();
            // ////////////////////////////////////////////////////////////
            // set the transaction factory short id, we have seen all the
            // trasactions in the log, and at the minimum, the checkpoint
            // transaction will be there.  Set the shortId to the next
            // value.
            // ////////////////////////////////////////////////////////////
            tf.resetTranId();
            // if can't checkpoint for some reasons, flush log and carry on
            if (!ReadOnlyDB) {
                boolean needCheckpoint = true;
                // rollbacks, then don't checkpoint. Otherwise checkpoint.
                if (currentCheckpoint != null && noInFlightTransactions && redoLWM != LogCounter.INVALID_LOG_INSTANT && undoLWM != LogCounter.INVALID_LOG_INSTANT) {
                    if ((logFileNumber == LogCounter.getLogFileNumber(redoLWM)) && (logFileNumber == LogCounter.getLogFileNumber(undoLWM)) && (endPosition < (LogCounter.getLogFilePosition(redoLWM) + 1000)))
                        needCheckpoint = false;
                }
                if (needCheckpoint && !checkpoint(rawStoreFactory, df, tf, false))
                    flush(logFileNumber, endPosition);
            }
            logger.close();
            recoveryNeeded = false;
        } catch (IOException ioe) {
            if (SanityManager.DEBUG)
                ioe.printStackTrace();
            throw markCorrupt(StandardException.newException(SQLState.LOG_IO_ERROR, ioe));
        } catch (ClassNotFoundException cnfe) {
            throw markCorrupt(StandardException.newException(SQLState.LOG_CORRUPTED, cnfe));
        } catch (StandardException se) {
            throw markCorrupt(se);
        } catch (Throwable th) {
            if (SanityManager.DEBUG) {
                SanityManager.showTrace(th);
                th.printStackTrace();
            }
            throw markCorrupt(StandardException.newException(SQLState.LOG_RECOVERY_FAILED, th));
        }
    } else {
        tf.useTransactionTable((Formatable) null);
        // set the transaction factory short id
        tf.resetTranId();
    }
    // done with recovery
    // ///////////////////////////////////////////////////////////
    // setup checkpoint daemon and cache cleaner
    // ///////////////////////////////////////////////////////////
    checkpointDaemon = rawStoreFactory.getDaemon();
    if (checkpointDaemon != null) {
        myClientNumber = checkpointDaemon.subscribe(this, true);
        // use the same daemon for the cache cleaner
        dataFactory.setupCacheCleaner(checkpointDaemon);
    }
}
Also used : IOException(java.io.IOException) StorageRandomAccessFile(org.apache.derby.io.StorageRandomAccessFile) StandardException(org.apache.derby.shared.common.error.StandardException) Formatable(org.apache.derby.iapi.services.io.Formatable) RawTransaction(org.apache.derby.iapi.store.raw.xact.RawTransaction) StorageFile(org.apache.derby.io.StorageFile)

Example 10 with StorageRandomAccessFile

use of org.apache.derby.io.StorageRandomAccessFile in project derby by apache.

the class LogToFile method switchLogFile.

/**
 *		Switch to the next log file if possible.
 *
 *		<P>MT - log factory is single threaded thru a log file switch, the log
 *		is frozen for the duration of the switch
 */
public void switchLogFile() throws StandardException {
    boolean switchedOver = false;
    // ///////////////////////////////////////////////////
    synchronized (this) {
        // the log is not frozen for backup.  Track (2985).
        while (logBeingFlushed | isFrozen) {
            try {
                wait();
            } catch (InterruptedException ie) {
                InterruptStatus.setInterrupted();
            }
        }
        // we have an empty log file here, refuse to switch.
        if (endPosition == LOG_FILE_HEADER_SIZE) {
            if (SanityManager.DEBUG) {
                Monitor.logMessage("not switching from an empty log file (" + logFileNumber + ")");
            }
            return;
        }
        // log file isn't being flushed right now and logOut is not being
        // used.
        StorageFile newLogFile = getLogFileName(logFileNumber + 1);
        if (logFileNumber + 1 >= maxLogFileNumber) {
            throw StandardException.newException(SQLState.LOG_EXCEED_MAX_LOG_FILE_NUMBER, maxLogFileNumber);
        }
        // the new log file
        StorageRandomAccessFile newLog = null;
        try {
            // switch log right now
            if (privExists(newLogFile) && !privDelete(newLogFile)) {
                logErrMsg(MessageService.getTextMessage(MessageId.LOG_NEW_LOGFILE_EXIST, newLogFile.getPath()));
                return;
            }
            try {
                newLog = privRandomAccessFile(newLogFile, "rw");
            } catch (IOException ioe) {
                newLog = null;
            }
            if (newLog == null || !privCanWrite(newLogFile)) {
                if (newLog != null)
                    newLog.close();
                newLog = null;
                return;
            }
            if (initLogFile(newLog, logFileNumber + 1, LogCounter.makeLogInstantAsLong(logFileNumber, endPosition))) {
                // New log file init ok, close the old one and
                // switch over, after this point, need to shutdown the
                // database if any error crops up
                switchedOver = true;
                // write out an extra 0 at the end to mark the end of the log
                // file.
                logOut.writeEndMarker(0);
                setEndPosition(endPosition + INT_LENGTH);
                // set that we are in log switch to prevent flusher
                // not requesting  to switch log again
                inLogSwitch = true;
                // flush everything including the int we just wrote
                flush(logFileNumber, endPosition);
                // simulate out of log error after the switch over
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(TEST_SWITCH_LOG_FAIL2))
                        throw new IOException("TestLogSwitchFail2");
                }
                // close the old log file
                logOut.close();
                logWrittenFromLastCheckPoint += endPosition;
                setEndPosition(newLog.getFilePointer());
                lastFlush = endPosition;
                if (isWriteSynced) {
                    // extend the file by wring zeros to it
                    preAllocateNewLogFile(newLog);
                    newLog.close();
                    newLog = openLogFileInWriteMode(newLogFile);
                    newLog.seek(endPosition);
                }
                logOut = new LogAccessFile(this, newLog, logBufferSize);
                newLog = null;
                if (SanityManager.DEBUG) {
                    if (endPosition != LOG_FILE_HEADER_SIZE)
                        SanityManager.THROWASSERT("new log file has unexpected size" + +endPosition);
                }
                logFileNumber++;
                if (SanityManager.DEBUG) {
                    SanityManager.ASSERT(endPosition == LOG_FILE_HEADER_SIZE, "empty log file has wrong size");
                }
            } else // something went wrong, delete the half baked file
            {
                newLog.close();
                newLog = null;
                if (privExists(newLogFile))
                    privDelete(newLogFile);
                logErrMsg(MessageService.getTextMessage(MessageId.LOG_CANNOT_CREATE_NEW, newLogFile.getPath()));
                newLogFile = null;
            }
        } catch (IOException ioe) {
            inLogSwitch = false;
            // switching log file is an optional operation and there is no direct user
            // control.  Just sends a warning message to whomever, if any,
            // system adminstrator there may be
            logErrMsg(MessageService.getTextMessage(MessageId.LOG_CANNOT_CREATE_NEW_DUETO, newLogFile.getPath(), ioe.toString()));
            try {
                if (newLog != null) {
                    newLog.close();
                    newLog = null;
                }
            } catch (IOException ioe2) {
            }
            if (newLogFile != null && privExists(newLogFile)) {
                privDelete(newLogFile);
                newLogFile = null;
            }
            if (// error occur after old log file has been closed!
            switchedOver) {
                // limit any damage
                logOut = null;
                throw markCorrupt(StandardException.newException(SQLState.LOG_IO_ERROR, ioe));
            }
        }
        // read the previous log file
        if (inReplicationSlaveMode) {
            allowedToReadFileNumber = logFileNumber - 1;
            synchronized (slaveRecoveryMonitor) {
                slaveRecoveryMonitor.notify();
            }
        }
        inLogSwitch = false;
    }
// unfreezes the log
}
Also used : StorageRandomAccessFile(org.apache.derby.io.StorageRandomAccessFile) StorageFile(org.apache.derby.io.StorageFile) IOException(java.io.IOException)

Aggregations

StorageRandomAccessFile (org.apache.derby.io.StorageRandomAccessFile)15 IOException (java.io.IOException)10 StorageFile (org.apache.derby.io.StorageFile)6 ByteArrayInputStream (java.io.ByteArrayInputStream)2 DataInputStream (java.io.DataInputStream)2 FileNotFoundException (java.io.FileNotFoundException)2 DataStore (org.apache.derby.impl.io.vfmem.DataStore)2 VirtualFile (org.apache.derby.impl.io.vfmem.VirtualFile)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 DataOutputStream (java.io.DataOutputStream)1 InputStream (java.io.InputStream)1 CipherProvider (org.apache.derby.iapi.services.crypto.CipherProvider)1 ArrayInputStream (org.apache.derby.iapi.services.io.ArrayInputStream)1 Formatable (org.apache.derby.iapi.services.io.Formatable)1 LogInstant (org.apache.derby.iapi.store.raw.log.LogInstant)1 RawTransaction (org.apache.derby.iapi.store.raw.xact.RawTransaction)1 StandardException (org.apache.derby.shared.common.error.StandardException)1