use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class CachedPage method writePage.
/**
* write the page from this CachedPage object to disk.
* <p>
*
* @param identity indentity (ie. page number) of the page to read
* @param syncMe does the write of this single page have to be sync'd?
*
* @exception StandardException Standard exception policy.
*/
private void writePage(PageKey identity, boolean syncMe) throws StandardException {
// make subclass write the page format
writeFormatId(identity);
// let subclass have a chance to write any cached data to page data
// array
writePage(identity);
// force WAL - and check to see if database is corrupt or is frozen.
// last log Instant may be null if the page is being forced
// to disk on a createPage (which violates the WAL protocol actually).
// See FileContainer.newPage
LogInstant flushLogTo = getLastLogInstant();
dataFactory.flush(flushLogTo);
if (flushLogTo != null) {
clearLastLogInstant();
}
// find the container and file access object
FileContainer myContainer = (FileContainer) containerCache.find(identity.getContainerId());
if (myContainer == null) {
StandardException nested = StandardException.newException(SQLState.DATA_CONTAINER_VANISHED, identity.getContainerId());
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_WRITE_PAGE_EXCEPTION, nested, identity));
}
try {
myContainer.writePage(identity.getPageNumber(), pageData, syncMe);
if (!isOverflowPage() && isDirty()) {
// let the container knows whether this page is a not
// filled, non-overflow page
myContainer.trackUnfilledPage(identity.getPageNumber(), unfilled());
// if this is not an overflow page, see if the page's row
// count has changed since it come into the cache.
//
// if the page is not invalid, row count is 0. Otherwise,
// count non-deleted records on page.
//
// Cannot call nonDeletedRecordCount because the page is
// unlatched now even though nobody is changing it
int currentRowCount = internalNonDeletedRecordCount();
if (currentRowCount != initialRowCount) {
myContainer.updateEstimatedRowCount(currentRowCount - initialRowCount);
setContainerRowCount(myContainer.getEstimatedRowCount(0));
initialRowCount = currentRowCount;
}
}
} catch (IOException ioe) {
// page cannot be written
throw StandardException.newException(SQLState.FILE_WRITE_PAGE_EXCEPTION, ioe, identity);
} finally {
containerCache.release(myContainer);
myContainer = null;
}
synchronized (this) {
// change page state to not dirty after the successful write
isDirty = false;
preDirty = false;
}
}
use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class LogToFile method initializeReplicationSlaveRole.
/**
* Initializes logOut so that log received from the replication
* master can be appended to the log file.
*
* Normally, logOut (the file log records are appended to) is set
* up as part of the recovery process. When the database is booted
* in replication slave mode, however, recovery will not get to
* the point where logOut is initialized until this database is no
* longer in slave mode. Since logOut is needed to append log
* records received from the master, logOut needs to be set up for
* replication slave mode.
*
* This method finds the last log record in the log file with the
* highest number. logOut is set up so that log records will be
* appended to the end of that file, and the endPosition and
* lastFlush variables are set to point to the end of the same
* file. All this is normally done as part of recovery.
*
* After the first log file switch resulting from applying log
* received from the master, recovery will be allowed to read up
* to, but not including, the current log file which is the file
* numbered logFileNumber.
*
* Note that this method must not be called until LogToFile#boot()
* has completed. Currently, this is ensured because RawStore#boot
* starts the SlaveFactory (in turn calling this method) after
* LogFactory.boot() has completed. Race conditions for
* logFileNumber may occur if this is changed.
*
* @exception StandardException Standard Derby error policy
*/
public void initializeReplicationSlaveRole() throws StandardException {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(inReplicationSlaveMode, "This method should only be used when" + " in slave replication mode");
}
try {
// Find the log file with the highest file number on disk
while (getLogFileAtBeginning(logFileNumber + 1) != null) {
logFileNumber++;
}
// Scan the highest log file to find it's end.
long startInstant = LogCounter.makeLogInstantAsLong(logFileNumber, LOG_FILE_HEADER_SIZE);
long logEndInstant = LOG_FILE_HEADER_SIZE;
StreamLogScan scanOfHighestLogFile = (StreamLogScan) openForwardsScan(startInstant, (LogInstant) null);
ArrayInputStream scanInputStream = new ArrayInputStream();
while (scanOfHighestLogFile.getNextRecord(scanInputStream, null, 0) != null) {
logEndInstant = scanOfHighestLogFile.getLogRecordEnd();
}
setEndPosition(LogCounter.getLogFilePosition(logEndInstant));
// endPosition and logFileNumber now point to the end of the
// highest log file. This is where a new log record should be
// appended.
/*
* Open the highest log file and make sure log records are
* appended at the end of it
*/
StorageRandomAccessFile logFile = null;
if (isWriteSynced) {
logFile = openLogFileInWriteMode(getLogFileName(logFileNumber));
} else {
logFile = privRandomAccessFile(getLogFileName(logFileNumber), "rw");
}
logOut = new LogAccessFile(this, logFile, logBufferSize);
lastFlush = endPosition;
// append log records at the end of
logFile.seek(endPosition);
// the file
} catch (IOException ioe) {
throw StandardException.newException(SQLState.REPLICATION_UNEXPECTED_EXCEPTION, ioe);
}
}
use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class FileLogger method logAndDo.
/*
** Methods of Logger
*/
/**
* Writes out a log record to the log stream, and call its doMe method to
* apply the change to the rawStore.
* <BR>Any optional data the doMe method need is first written to the log
* stream using operation.writeOptionalData, then whatever is written to
* the log stream is passed back to the operation for the doMe method.
*
* <P>MT - there could be multiple threads running in the same raw
* transactions and they can be calling the same logger to log different
* log operations. This whole method is synchronized to make sure log
* records are logged one at a time.
*
* @param xact the transaction logging the change
* @param operation the log operation
* @return the instant in the log that can be used to identify the log
* record
*
* @exception StandardException Derby Standard error policy
*/
public synchronized LogInstant logAndDo(RawTransaction xact, Loggable operation) throws StandardException {
boolean isLogPrepared = false;
boolean inUserCode = false;
byte[] preparedLog;
try {
logOutputBuffer.reset();
// always use the short Id, only the BeginXact log record contains
// the XactId (long form)
TransactionId transactionId = xact.getId();
// write out the log header with the operation embedded
// this is by definition not a compensation log record,
// those are called thru the logAndUndo interface
logRecord.setValue(transactionId, operation);
inUserCode = true;
logicalOut.writeObject(logRecord);
inUserCode = false;
int optionalDataLength = 0;
int optionalDataOffset = 0;
int completeLength = 0;
ByteArray preparedLogArray = operation.getPreparedLog();
if (preparedLogArray != null) {
preparedLog = preparedLogArray.getArray();
optionalDataLength = preparedLogArray.getLength();
optionalDataOffset = preparedLogArray.getOffset();
// There is a race condition if the operation is a begin tran in
// that between the time the beginXact log record is written to
// disk and the time the transaction object is updated in the
// beginXact.doMe method, other log records may be written.
// This will render the transaction table in an inconsistent state
// since it may think a later transaction is the earliest
// transaction or it may think that there is no active transactions
// where there is a bunch of them sitting on the log.
//
// Similarly, there is a race condition for endXact, i.e.,
// 1) endXact is written to the log,
// 2) checkpoint gets that (committed) transaction as the
// firstUpdateTransaction
// 3) the transaction calls postComplete, nulling out itself
// 4) checkpoint tries to access a closed transaction object
//
// The solution is to sync between the time a begin tran or end
// tran log record is sent to the log stream and its doMe method is
// called to update the transaction table and in memory state
//
// We only need to serialized the begin and end Xact log records
// because once a transaction has been started and in the
// transaction table, its order and transaction state does not
// change.
//
// Use the logFactory as the sync object so that a checkpoint can
// take its snap shot of the undoLWM before or after a transaction
// is started, but not in the middle. (see LogToFile.checkpoint)
//
// now set the input limit to be the optional data.
// This limits amount of data availiable to logIn that doMe can
// use
logIn.setData(preparedLog);
logIn.setPosition(optionalDataOffset);
logIn.setLimit(optionalDataLength);
if (SanityManager.DEBUG) {
if ((optionalDataLength) != logIn.available())
SanityManager.THROWASSERT(" stream not set correctly " + optionalDataLength + " != " + logIn.available());
}
} else {
preparedLog = null;
optionalDataLength = 0;
}
logicalOut.writeInt(optionalDataLength);
completeLength = logOutputBuffer.getPosition() + optionalDataLength;
LogInstant logInstant = null;
// in case of encryption, we need to pad
int encryptedLength = 0;
try {
if (logFactory.databaseEncrypted()) {
// we must pad the encryption data to be multiple of block
// size, which is logFactory.getEncryptionBlockSize()
encryptedLength = completeLength;
if ((encryptedLength % logFactory.getEncryptionBlockSize()) != 0)
encryptedLength = encryptedLength + logFactory.getEncryptionBlockSize() - (encryptedLength % logFactory.getEncryptionBlockSize());
if (encryptionBuffer == null || encryptionBuffer.length < encryptedLength)
encryptionBuffer = new byte[encryptedLength];
System.arraycopy(logOutputBuffer.getByteArray(), 0, encryptionBuffer, 0, completeLength - optionalDataLength);
if (optionalDataLength > 0)
System.arraycopy(preparedLog, optionalDataOffset, encryptionBuffer, completeLength - optionalDataLength, optionalDataLength);
// do not bother to clear out the padding area
int len = logFactory.encrypt(encryptionBuffer, 0, encryptedLength, encryptionBuffer, 0);
if (SanityManager.DEBUG)
SanityManager.ASSERT(len == encryptedLength, "encrypted log buffer length != log buffer len");
}
if ((operation.group() & (Loggable.FIRST | Loggable.LAST)) != 0) {
synchronized (logFactory) {
long instant = 0;
if (logFactory.databaseEncrypted()) {
// encryption has completely drained both the the
// logOuputBuffer array and the preparedLog array
instant = logFactory.appendLogRecord(encryptionBuffer, 0, encryptedLength, null, -1, 0);
} else {
instant = logFactory.appendLogRecord(logOutputBuffer.getByteArray(), 0, completeLength, preparedLog, optionalDataOffset, optionalDataLength);
}
logInstant = new LogCounter(instant);
operation.doMe(xact, logInstant, logIn);
}
} else {
long instant = 0;
if (logFactory.databaseEncrypted()) {
// encryption has completely drained both the the
// logOuputBuffer array and the preparedLog array
instant = logFactory.appendLogRecord(encryptionBuffer, 0, encryptedLength, null, -1, 0);
} else {
instant = logFactory.appendLogRecord(logOutputBuffer.getByteArray(), 0, completeLength, preparedLog, optionalDataOffset, optionalDataLength);
}
logInstant = new LogCounter(instant);
operation.doMe(xact, logInstant, logIn);
}
} catch (StandardException se) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_DO_ME_FAIL, se, operation));
} catch (IOException ioe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_DO_ME_FAIL, ioe, operation));
} finally {
logIn.clearLimit();
}
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "Write log record: tranId=" + transactionId.toString() + " instant: " + logInstant.toString() + " length: " + completeLength + "\n" + operation + "\n");
}
}
return logInstant;
} catch (IOException ioe) {
// error writing to the log buffer
if (inUserCode) {
throw StandardException.newException(SQLState.LOG_WRITE_LOG_RECORD, ioe, operation);
} else {
throw StandardException.newException(SQLState.LOG_BUFFER_FULL, ioe, operation);
}
}
}
use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class LoggableActions method doAction.
private void doAction(RawTransaction t, BasePage page, Loggable lop) throws StandardException {
// sanity check
long oldversion = 0;
// sanity check
LogInstant oldLogInstant = null;
if (SanityManager.DEBUG) {
oldLogInstant = page.getLastLogInstant();
oldversion = page.getPageVersion();
}
// mark the page as pre-dirtied so that if a checkpoint happens after
// the log record is sent to the log stream, the cache cleaning will
// wait for this change.
page.preDirty();
t.logAndDo(lop);
if (SanityManager.DEBUG) {
// case, it is expected to stay null
if (oldLogInstant != null && page.getLastLogInstant() != null && !oldLogInstant.lessThan(page.getLastLogInstant()))
SanityManager.THROWASSERT("old log instant = " + oldLogInstant + " lastlog = " + page.getLastLogInstant());
SanityManager.ASSERT(oldversion == ((PageBasicOperation) lop).getPageVersion());
SanityManager.ASSERT(page.getPageVersion() > oldversion);
}
}
use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class FlushedScan method setFirstUnflushed.
/*
Private methods.
*/
private void setFirstUnflushed() throws StandardException, IOException {
LogInstant firstUnflushedInstant = logFactory.getFirstUnflushedInstant();
firstUnflushed = ((LogCounter) firstUnflushedInstant).getValueAsLong();
firstUnflushedFileNumber = LogCounter.getLogFileNumber(firstUnflushed);
firstUnflushedFilePosition = LogCounter.getLogFilePosition(firstUnflushed);
setCurrentLogFileFirstUnflushedPosition();
}
Aggregations