Search in sources :

Example 6 with ByteArray

use of org.apache.derby.iapi.util.ByteArray in project derby by apache.

the class CopyRowsOperation method writeOptionalDataToBuffer.

/**
 *		Write the rows that are to be copied into this page
 *
 *		@exception IOException Can be thrown by any of the methods of ObjectOutput.
 *		@exception StandardException Standard Derby policy.
 */
private void writeOptionalDataToBuffer(RawTransaction t, BasePage srcPage, int srcSlot) throws StandardException, IOException {
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(this.page != null);
        SanityManager.ASSERT(srcPage != null);
    }
    DynamicByteArrayOutputStream logBuffer = t.getLogBuffer();
    int optionalDataStart = logBuffer.getPosition();
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(optionalDataStart == 0, "Buffer for writing the optional data should start at position 0");
    }
    // check to make sure the destination page have the necessary space to
    // take the rows
    int[] spaceNeeded = new int[num_rows];
    int startPosition = logBuffer.getPosition();
    for (int i = 0; i < num_rows; i++) {
        // the recordId passed in is the record Id this row will have at
        // the destination page, not the record Id this row has on the
        // srcPage.
        srcPage.logRecord(i + srcSlot, BasePage.LOG_RECORD_DEFAULT, recordIds[i], (FormatableBitSet) null, logBuffer, (RecordHandle) null);
        spaceNeeded[i] = logBuffer.getPosition() - startPosition;
        startPosition = logBuffer.getPosition();
        // now spaceNeeded[i] has the actual record size.  However, the src
        // page may actually leave more space for the record due to
        // reserved space.  Because we want to copy the reserve space as well,
        // we need to take into account that amount.
        spaceNeeded[i] += reservedSpace[i];
    }
    // page is the destination page.
    if (!page.spaceForCopy(num_rows, spaceNeeded)) {
        throw StandardException.newException(SQLState.DATA_NO_SPACE_FOR_RECORD);
    }
    int optionalDataLength = logBuffer.getPosition() - optionalDataStart;
    if (SanityManager.DEBUG) {
        if (optionalDataLength != logBuffer.getUsed())
            SanityManager.THROWASSERT("wrong optional data length, optionalDataLength = " + optionalDataLength + ", logBuffer.getUsed() = " + logBuffer.getUsed());
    }
    // set the position to the beginning of the buffer
    logBuffer.setPosition(optionalDataStart);
    this.preparedLog = new ByteArray(logBuffer.getByteArray(), optionalDataStart, optionalDataLength);
}
Also used : DynamicByteArrayOutputStream(org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream) ByteArray(org.apache.derby.iapi.util.ByteArray)

Example 7 with ByteArray

use of org.apache.derby.iapi.util.ByteArray in project derby by apache.

the class PurgeOperation method writeOptionalDataToBuffer.

/**
 *		Write out the purged record from the page.  Used for undo only.
 *
 *		@exception IOException Can be thrown by any of the methods of ObjectOutput.
 *		@exception StandardException Standard Derby policy.
 */
private void writeOptionalDataToBuffer(RawTransaction t, boolean needDataLogged) throws StandardException, IOException {
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(this.page != null);
    }
    DynamicByteArrayOutputStream logBuffer = t.getLogBuffer();
    int optionalDataStart = logBuffer.getPosition();
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(optionalDataStart == 0, "Buffer for writing the optional data should start at position 0");
    }
    for (int i = 0; i < num_rows; i++) {
        if (needDataLogged) {
            this.page.logRecord(i + slot, BasePage.LOG_RECORD_DEFAULT, recordIds[i], (FormatableBitSet) null, logBuffer, (RecordHandle) null);
        } else {
            this.page.logRecord(i + slot, BasePage.LOG_RECORD_FOR_PURGE, recordIds[i], (FormatableBitSet) null, logBuffer, (RecordHandle) null);
        }
    }
    int optionalDataLength = logBuffer.getPosition() - optionalDataStart;
    if (SanityManager.DEBUG) {
        if (optionalDataLength != logBuffer.getUsed())
            SanityManager.THROWASSERT("wrong optional data length, optionalDataLength = " + optionalDataLength + ", logBuffer.getUsed() = " + logBuffer.getUsed());
    }
    // set the position to the beginning of the buffer
    logBuffer.setPosition(optionalDataStart);
    this.preparedLog = new ByteArray(logBuffer.getByteArray(), optionalDataStart, optionalDataLength);
}
Also used : DynamicByteArrayOutputStream(org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream) ByteArray(org.apache.derby.iapi.util.ByteArray)

Example 8 with ByteArray

use of org.apache.derby.iapi.util.ByteArray in project derby by apache.

the class T_Undoable method writeOptionalDataToBuffer.

private void writeOptionalDataToBuffer() throws StandardException, IOException {
    if (logBuffer == null) {
        // YYZ: need to revisit this.  Do we really want to allocate this much for a buffer every time?
        // init size 1K
        logBuffer = new DynamicByteArrayOutputStream(1024);
    } else {
        logBuffer.reset();
    }
    int optionalDataStart = logBuffer.getPosition();
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(optionalDataStart == 0, "Buffer for writing the optional data should start at position 0");
    }
    // MsgTrace.traceString("{{{tu.writeOpetionalData");
    if (optionalDataLen > 0) {
        byte[] buf = new byte[optionalDataLen];
        for (int ix = 0; ix < optionalDataLen; ix++) buf[ix] = (byte) ix;
        logBuffer.write(buf);
    }
    // MsgTrace.traceString("}}}tu.writeOpetionalData");
    int optionalDataLength = logBuffer.getPosition() - optionalDataStart;
    if (SanityManager.DEBUG) {
        if (optionalDataLength != logBuffer.getUsed())
            SanityManager.THROWASSERT("wrong optional data length, optionalDataLength = " + optionalDataLength + ", logBuffer.getUsed() = " + logBuffer.getUsed());
    }
    // set the position to the beginning of the buffer
    logBuffer.setPosition(optionalDataStart);
    this.preparedLog = new ByteArray(logBuffer.getByteArray(), optionalDataStart, optionalDataLength);
}
Also used : DynamicByteArrayOutputStream(org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream) ByteArray(org.apache.derby.iapi.util.ByteArray)

Example 9 with ByteArray

use of org.apache.derby.iapi.util.ByteArray in project derby by apache.

the class UpdateOperation method writeOptionalDataToBuffer.

/**
 *		Write out the changed colums of new record (from the row) followed by
 *        changed columns of the old record (from the page).
 *
 *		@exception StandardException Thrown by methods I call
 *		@exception IOException Thrown by methods I call
 */
private void writeOptionalDataToBuffer(RawTransaction t, DynamicByteArrayOutputStream logBuffer, Object[] row, FormatableBitSet validColumns, int realStartColumn, int realSpaceOnPage, RecordHandle headRowHandle) throws StandardException, IOException {
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(this.page != null);
    }
    if (realStartColumn == (-1)) {
        logBuffer = t.getLogBuffer();
    }
    int optionalDataStart = logBuffer.getPosition();
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT((realStartColumn != -1 || optionalDataStart == 0), "Buffer for writing optional data should start at position 0");
    }
    this.nextColumn = this.page.logRow(doMeSlot, false, recordId, row, validColumns, logBuffer, 0, Page.INSERT_OVERFLOW, realStartColumn, realSpaceOnPage, 100);
    FormatableBitSet loggedColumns = validColumns;
    if ((nextColumn != -1) && (validColumns != null)) {
        // if nextColumn is not -1, then this must be an update which moves
        // columns off of the current page.  If validColumns == null then
        // we are logging all of the before image columns anyway.
        // get total number of fields of the old record.
        int numberFields = page.getHeaderAtSlot(doMeSlot).getNumberFields();
        // create new bit map, copying all bits that were set in original
        loggedColumns = new FormatableBitSet(validColumns);
        // make sure there is room in the bit map to add the columns being
        // deleted from the end of the row.
        // The important thing is that endField must be at least as big as
        // the number of columns in the entire record (including previous
        // pages of a long row) up to the end of this page.
        int endField = nextColumn + numberFields;
        loggedColumns.grow(endField);
        // logRecord will just ignore the extra bits.
        for (int i = nextColumn; i < endField; i++) {
            loggedColumns.set(i);
        }
    }
    // log the old version of the changed data
    this.page.logRecord(doMeSlot, BasePage.LOG_RECORD_FOR_UPDATE, recordId, loggedColumns, logBuffer, headRowHandle);
    // get length of all the optional data.
    optionalDataStart = logBuffer.getBeginPosition();
    int optionalDataLength = logBuffer.getPosition() - optionalDataStart;
    // set the position to the beginning of the buffer
    logBuffer.setPosition(optionalDataStart);
    this.preparedLog = new ByteArray(logBuffer.getByteArray(), optionalDataStart, optionalDataLength);
}
Also used : ByteArray(org.apache.derby.iapi.util.ByteArray) FormatableBitSet(org.apache.derby.iapi.services.io.FormatableBitSet)

Example 10 with ByteArray

use of org.apache.derby.iapi.util.ByteArray in project derby by apache.

the class FileLogger method logAndDo.

/*
	** Methods of Logger
	*/
/**
 *		Writes out a log record to the log stream, and call its doMe method to
 *		apply the change to the rawStore.
 *		<BR>Any optional data the doMe method need is first written to the log
 *		stream using operation.writeOptionalData, then whatever is written to
 *		the log stream is passed back to the operation for the doMe method.
 *
 *		<P>MT - there could be multiple threads running in the same raw
 *		transactions and they can be calling the same logger to log different
 *		log operations.  This whole method is synchronized to make sure log
 *		records are logged one at a time.
 *
 *		@param xact the transaction logging the change
 *		@param operation the log operation
 *		@return the instant in the log that can be used to identify the log
 *		record
 *
 *		@exception StandardException Derby Standard error policy
 */
public synchronized LogInstant logAndDo(RawTransaction xact, Loggable operation) throws StandardException {
    boolean isLogPrepared = false;
    boolean inUserCode = false;
    byte[] preparedLog;
    try {
        logOutputBuffer.reset();
        // always use the short Id, only the BeginXact log record contains
        // the XactId (long form)
        TransactionId transactionId = xact.getId();
        // write out the log header with the operation embedded
        // this is by definition not a compensation log record,
        // those are called thru the logAndUndo interface
        logRecord.setValue(transactionId, operation);
        inUserCode = true;
        logicalOut.writeObject(logRecord);
        inUserCode = false;
        int optionalDataLength = 0;
        int optionalDataOffset = 0;
        int completeLength = 0;
        ByteArray preparedLogArray = operation.getPreparedLog();
        if (preparedLogArray != null) {
            preparedLog = preparedLogArray.getArray();
            optionalDataLength = preparedLogArray.getLength();
            optionalDataOffset = preparedLogArray.getOffset();
            // There is a race condition if the operation is a begin tran in
            // that between the time the beginXact log record is written to
            // disk and the time the transaction object is updated in the
            // beginXact.doMe method, other log records may be written.
            // This will render the transaction table in an inconsistent state
            // since it may think a later transaction is the earliest
            // transaction or it may think that there is no active transactions
            // where there is a bunch of them sitting on the log.
            // 
            // Similarly, there is a race condition for endXact, i.e.,
            // 1) endXact is written to the log,
            // 2) checkpoint gets that (committed) transaction as the
            // firstUpdateTransaction
            // 3) the transaction calls postComplete, nulling out itself
            // 4) checkpoint tries to access a closed transaction object
            // 
            // The solution is to sync between the time a begin tran or end
            // tran log record is sent to the log stream and its doMe method is
            // called to update the transaction table and in memory state
            // 
            // We only need to serialized the begin and end Xact log records
            // because once a transaction has been started and in the
            // transaction table, its order and transaction state does not
            // change.
            // 
            // Use the logFactory as the sync object so that a checkpoint can
            // take its snap shot of the undoLWM before or after a transaction
            // is started, but not in the middle. (see LogToFile.checkpoint)
            // 
            // now set the input limit to be the optional data.
            // This limits amount of data availiable to logIn that doMe can
            // use
            logIn.setData(preparedLog);
            logIn.setPosition(optionalDataOffset);
            logIn.setLimit(optionalDataLength);
            if (SanityManager.DEBUG) {
                if ((optionalDataLength) != logIn.available())
                    SanityManager.THROWASSERT(" stream not set correctly " + optionalDataLength + " != " + logIn.available());
            }
        } else {
            preparedLog = null;
            optionalDataLength = 0;
        }
        logicalOut.writeInt(optionalDataLength);
        completeLength = logOutputBuffer.getPosition() + optionalDataLength;
        LogInstant logInstant = null;
        // in case of encryption, we need to pad
        int encryptedLength = 0;
        try {
            if (logFactory.databaseEncrypted()) {
                // we must pad the encryption data to be multiple of block
                // size, which is logFactory.getEncryptionBlockSize()
                encryptedLength = completeLength;
                if ((encryptedLength % logFactory.getEncryptionBlockSize()) != 0)
                    encryptedLength = encryptedLength + logFactory.getEncryptionBlockSize() - (encryptedLength % logFactory.getEncryptionBlockSize());
                if (encryptionBuffer == null || encryptionBuffer.length < encryptedLength)
                    encryptionBuffer = new byte[encryptedLength];
                System.arraycopy(logOutputBuffer.getByteArray(), 0, encryptionBuffer, 0, completeLength - optionalDataLength);
                if (optionalDataLength > 0)
                    System.arraycopy(preparedLog, optionalDataOffset, encryptionBuffer, completeLength - optionalDataLength, optionalDataLength);
                // do not bother to clear out the padding area
                int len = logFactory.encrypt(encryptionBuffer, 0, encryptedLength, encryptionBuffer, 0);
                if (SanityManager.DEBUG)
                    SanityManager.ASSERT(len == encryptedLength, "encrypted log buffer length != log buffer len");
            }
            if ((operation.group() & (Loggable.FIRST | Loggable.LAST)) != 0) {
                synchronized (logFactory) {
                    long instant = 0;
                    if (logFactory.databaseEncrypted()) {
                        // encryption has completely drained both the the
                        // logOuputBuffer array and the preparedLog array
                        instant = logFactory.appendLogRecord(encryptionBuffer, 0, encryptedLength, null, -1, 0);
                    } else {
                        instant = logFactory.appendLogRecord(logOutputBuffer.getByteArray(), 0, completeLength, preparedLog, optionalDataOffset, optionalDataLength);
                    }
                    logInstant = new LogCounter(instant);
                    operation.doMe(xact, logInstant, logIn);
                }
            } else {
                long instant = 0;
                if (logFactory.databaseEncrypted()) {
                    // encryption has completely drained both the the
                    // logOuputBuffer array and the preparedLog array
                    instant = logFactory.appendLogRecord(encryptionBuffer, 0, encryptedLength, null, -1, 0);
                } else {
                    instant = logFactory.appendLogRecord(logOutputBuffer.getByteArray(), 0, completeLength, preparedLog, optionalDataOffset, optionalDataLength);
                }
                logInstant = new LogCounter(instant);
                operation.doMe(xact, logInstant, logIn);
            }
        } catch (StandardException se) {
            throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_DO_ME_FAIL, se, operation));
        } catch (IOException ioe) {
            throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_DO_ME_FAIL, ioe, operation));
        } finally {
            logIn.clearLimit();
        }
        if (SanityManager.DEBUG) {
            if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                SanityManager.DEBUG(LogToFile.DBG_FLAG, "Write log record: tranId=" + transactionId.toString() + " instant: " + logInstant.toString() + " length: " + completeLength + "\n" + operation + "\n");
            }
        }
        return logInstant;
    } catch (IOException ioe) {
        // error writing to the log buffer
        if (inUserCode) {
            throw StandardException.newException(SQLState.LOG_WRITE_LOG_RECORD, ioe, operation);
        } else {
            throw StandardException.newException(SQLState.LOG_BUFFER_FULL, ioe, operation);
        }
    }
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException) LogInstant(org.apache.derby.iapi.store.raw.log.LogInstant) ByteArray(org.apache.derby.iapi.util.ByteArray) LogCounter(org.apache.derby.impl.store.raw.log.LogCounter) IOException(java.io.IOException) TransactionId(org.apache.derby.iapi.store.raw.xact.TransactionId)

Aggregations

ByteArray (org.apache.derby.iapi.util.ByteArray)13 DynamicByteArrayOutputStream (org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream)6 IOException (java.io.IOException)2 FormatableBitSet (org.apache.derby.iapi.services.io.FormatableBitSet)2 RecordHandle (org.apache.derby.iapi.store.raw.RecordHandle)2 StandardException (org.apache.derby.shared.common.error.StandardException)2 ByteArrayInputStream (java.io.ByteArrayInputStream)1 EOFException (java.io.EOFException)1 InputStream (java.io.InputStream)1 Connection (java.sql.Connection)1 ArrayInputStream (org.apache.derby.iapi.services.io.ArrayInputStream)1 FormatIdInputStream (org.apache.derby.iapi.services.io.FormatIdInputStream)1 StreamStorable (org.apache.derby.iapi.services.io.StreamStorable)1 ClassFactory (org.apache.derby.iapi.services.loader.ClassFactory)1 LanguageConnectionContext (org.apache.derby.iapi.sql.conn.LanguageConnectionContext)1 LogInstant (org.apache.derby.iapi.store.raw.log.LogInstant)1 TransactionId (org.apache.derby.iapi.store.raw.xact.TransactionId)1 DataValueDescriptor (org.apache.derby.iapi.types.DataValueDescriptor)1 EmbedConnection (org.apache.derby.impl.jdbc.EmbedConnection)1 LogCounter (org.apache.derby.impl.store.raw.log.LogCounter)1