use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class ExportAbstract method stringifyObject.
// write a Serializable as a string
public static String stringifyObject(Object udt) throws Exception {
DynamicByteArrayOutputStream dbaos = new DynamicByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(dbaos);
oos.writeObject(udt);
byte[] buffer = dbaos.getByteArray();
int length = dbaos.getUsed();
return StringUtil.toHexString(buffer, 0, length);
}
use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class BasePage method insertAllowOverflow.
/**
* Insert a row allowing overflow.
*
* If handle is supplied then the record at that hanlde will be updated
* to indicate it is a partial row and it has an overflow portion.
*
* @exception StandardException Standard Derby error policy
*/
public RecordHandle insertAllowOverflow(int slot, Object[] row, FormatableBitSet validColumns, int startColumn, byte insertFlag, int overflowThreshold, RecordHandle nextPortionHandle) throws StandardException {
BasePage curPage = this;
if (!curPage.owner.updateOK()) {
throw StandardException.newException(SQLState.DATA_CONTAINER_READ_ONLY);
}
// Handle of the first portion of the chain
RecordHandle headHandle = null;
RecordHandle handleToUpdate = null;
RawTransaction t = curPage.owner.getTransaction();
for (; ; ) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage.isLatched());
}
if (!curPage.allowInsert())
return null;
// 'this' is the head page
if (curPage != this)
slot = curPage.recordCount;
boolean isLongColumns = false;
int realStartColumn = -1;
int realSpaceOnPage = -1;
DynamicByteArrayOutputStream logBuffer = null;
// allocate new record id and handle
int recordId = curPage.newRecordIdAndBump();
RecordHandle handle = new RecordId(curPage.getPageId(), recordId, slot);
if (curPage == this) {
// Lock the row, if it is the very first portion of the record.
if (handleToUpdate == null) {
while (!owner.getLockingPolicy().lockRecordForWrite(t, handle, true, /* lock is for insert */
false)) {
// loop until we get a new record id we can get a lock
// on. If we can't get the lock without waiting then
// assume the record id is owned by another xact. The
// current heap overflow algorithm makes this likely,
// as it first try's to insert a row telling raw store
// to fail if it doesn't fit on the page getting a lock
// on an id that never makes it to disk. The
// inserting transaction will hold a lock on this
// "unused" record id until it commits. The page can
// leave the cache at this point, and the inserting
// transaction has not dirtied the page (it failed
// after getting the lock but before logging anything),
// another inserting transaction will then get the
// same id as the previous inserter - thus the loop on
// lock waits.
//
// The lock we request indicates that this is a lock
// for insert, which the locking policy may use to
// perform locking concurrency optimizations.
// allocate new record id and handle
recordId = curPage.newRecordIdAndBump();
handle = new RecordId(curPage.getPageId(), recordId, slot);
}
}
headHandle = handle;
}
do {
// then, we redo the insert with saved logBuffer.
try {
startColumn = owner.getActionSet().actionInsert(t, curPage, slot, recordId, row, validColumns, (LogicalUndo) null, insertFlag, startColumn, false, realStartColumn, logBuffer, realSpaceOnPage, overflowThreshold);
isLongColumns = false;
} catch (LongColumnException lce) {
// we caught a long column exception
// three things should happen here:
// 1. insert the long column into overflow pages.
// 2. append the overflow field header in the main chain.
// 3. continue the insert in the main data chain.
logBuffer = new DynamicByteArrayOutputStream(lce.getLogBuffer());
// step 1: insert the long column ... use the same
// insertFlag as the rest of the row.
RecordHandle longColumnHandle = insertLongColumn(curPage, lce, insertFlag);
// step 2: append the overflow field header to the log buffer
int overflowFieldLen = 0;
try {
overflowFieldLen += appendOverflowFieldHeader((DynamicByteArrayOutputStream) logBuffer, longColumnHandle);
} catch (IOException ioe) {
// YYZ: revisit... ioexception, insert failed...
return null;
}
// step 3: continue the insert in the main data chain
// need to pass the log buffer, and start column to the next insert.
realStartColumn = lce.getNextColumn() + 1;
realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;
isLongColumns = true;
}
} while (isLongColumns);
if (handleToUpdate != null) {
// update the recordheader on the previous page
updateOverflowDetails(handleToUpdate, handle);
}
// all done
if (startColumn == -1) {
if (curPage != this)
curPage.unlatch();
if (nextPortionHandle != null) {
// need to update the overflow details of the last portion
// to point to the existing portion
updateOverflowDetails(handle, nextPortionHandle);
}
return headHandle;
}
handleToUpdate = handle;
BasePage nextPage = curPage.getOverflowPageForInsert(slot, row, validColumns, startColumn);
if (curPage != this)
curPage.unlatch();
curPage = nextPage;
}
}
use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class BasePage method insertLongColumn.
/**
* Routine to insert a long column.
* <p>
* This code inserts a long column as a linked list of rows on overflow
* pages. This list is pointed to by a small pointer in the main page
* row column. The operation does the following:
* allocate new overflow page
* insert single row filling overflow page
* while (more of column exists)
* allocate new overflow page
* insert single row with next piece of row
* update previous piece to point to this new piece of row
*
* Same code is called both from an initial insert of a long column and
* from a subsequent update that results in a long column.
*
* @return The recordHandle of the first piece of the long column chain.
*
* @param mainChainPage The parent page with row piece containing column
* that will eventually point to this long column
* chain.
* @param lce The LongColumnException thrown when we recognized
* that the column being inserted was "long", this
* structure is used to cache the info that we have
* read so far about column. In the case of an insert
* of the stream it will have a copy of just the first
* page of the stream that has already been read once.
* @param insertFlag flags for insert operation.
*
* @exception StandardException Standard exception policy.
*/
protected RecordHandle insertLongColumn(BasePage mainChainPage, LongColumnException lce, byte insertFlag) throws StandardException {
Object[] row = new Object[1];
row[0] = lce.getColumn();
RecordHandle firstHandle = null;
RecordHandle handle = null;
RecordHandle prevHandle = null;
BasePage curPage = mainChainPage;
BasePage prevPage = null;
boolean isFirstPage = true;
// undo inserts as purges of all pieces of the overflow column
// except for the 1st overflow page pointed at by the main row.
//
// Consider a row with one column which is a long column
// that takes 2 pages for itself plus an entry in main parent page.
// the log records in order for this look something like:
// insert overflow page 1
// insert overflow page 2
// update overflow page 1 record to have pointer to overflow page 2
// insert main row (which has pointer to overflow page 1)
//
// If this insert gets aborted then something like the following
// happens:
// main row is marked deleted (but ptr to overflow 1 still exists)
// update is aborted so link on page 2 to page 1 is lost
// overflow row on page 2 is marked deleted
// overflow row on page 1 is marked deleted
//
// There is no way to reclaim page 2 later as the abort of the update
// has now lost the link from overflow page 1 to overflow 2, so
// the system has to do it as part of the abort of the insert. But,
// it can't for page 1 as the main page will attempt to follow
// it's link in the deleted row during it's space reclamation and it
// can't tell the difference
// between a row that has been marked deleted as part of an aborted
// insert or as part of a committed delete. When it follows the link
// it could find no page and that could be coded against, but it could
// be that the page is now used by some other overflow row which would
// lead to lots of different kinds of problems.
//
// So the code leaves the 1st overflow page to be cleaned up with the
// main page row is purged, but goes ahead and immediately purges all
// the segments that will be lost as part of the links being lost due
// to aborted updates.
byte after_first_page_insertFlag = (byte) (insertFlag | Page.INSERT_UNDO_WITH_PURGE);
// when inserting a long column startColumn is just used
// as a flag. -1 means the insert is complete, != -1 indicates
// more inserts are required.
int startColumn = 0;
RawTransaction t = curPage.owner.getTransaction();
do {
if (!isFirstPage) {
prevPage = curPage;
prevHandle = handle;
}
// step 1. get a new overflow page
curPage = (BasePage) getNewOverflowPage();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage.isLatched());
SanityManager.ASSERT(curPage.allowInsert());
}
int slot = curPage.recordCount;
int recordId = curPage.newRecordId();
handle = new RecordId(curPage.getPageId(), recordId, slot);
if (isFirstPage)
firstHandle = handle;
// step 2: insert column portion
startColumn = owner.getActionSet().actionInsert(t, curPage, slot, recordId, row, (FormatableBitSet) null, (LogicalUndo) null, (isFirstPage ? insertFlag : after_first_page_insertFlag), startColumn, true, -1, (DynamicByteArrayOutputStream) null, -1, 100);
// then release latch on prevPage
if (!isFirstPage) {
// for the previous page, add an overflow field header,
// and update the record header to show 2 fields
prevPage.updateFieldOverflowDetails(prevHandle, handle);
prevPage.unlatch();
prevPage = null;
} else {
isFirstPage = false;
}
} while (startColumn != (-1));
if (curPage != null) {
curPage.unlatch();
curPage = null;
}
return (firstHandle);
}
use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class StreamFileContainer method load.
/**
* load data into this container.
* <p>
* populate the stream container with data in the rowSource
* <p>
*
* @param rowSource The row source to get rows to load into this container.
*
* @exception StandardException Standard exception policy.
*/
public void load(RowSource rowSource) throws StandardException {
// use this output stream to buffer rows before inserting into file.
out = new DynamicByteArrayOutputStream(bufferSize);
logicalDataOut = new FormatIdOutputStream(out);
boolean encrypted = dataFactory.databaseEncrypted();
// it is not dataFactory.getEncryptionBlockSize() aligned.
if (encrypted) {
if (zeroBytes == null)
zeroBytes = new byte[dataFactory.getEncryptionBlockSize() - 1];
out.write(zeroBytes, 0, dataFactory.getEncryptionBlockSize() - 1);
}
try {
fileOut = privGetOutputStream(file);
FormatableBitSet validColumns = rowSource.getValidColumns();
Object[] row = rowSource.getNextRowFromRowSource();
int numberFields = 0;
if (validColumns != null) {
for (int i = validColumns.getLength() - 1; i >= 0; i--) {
if (validColumns.isSet(i)) {
numberFields = i + 1;
break;
}
}
} else {
numberFields = row.length;
}
// make the record header to have 0 record id
recordHeader = new StoredRecordHeader(0, numberFields);
// write the record header once for all the rows, directly to the
// beginning of the file.
int rhLen = recordHeader.write(out);
int validColumnsSize = validColumns == null ? 0 : validColumns.getLength();
while (row != null) {
int arrayPosition = -1;
for (int i = 0; i < numberFields; i++) {
// write each column out
if (validColumns == null) {
arrayPosition++;
Object column = row[arrayPosition];
writeColumn(column);
} else {
if (validColumnsSize > i && validColumns.isSet(i)) {
arrayPosition++;
Object column = row[arrayPosition];
writeColumn(column);
} else {
// it is a non-existent column
writeColumn(null);
}
}
// in the buffer
if ((out.getUsed() >= bufferSize) || ((bufferSize - out.getUsed()) < MIN_BUFFER_SIZE)) {
writeToFile();
}
}
// get the next row and its valid columns from the rowSource
row = rowSource.getNextRowFromRowSource();
}
// dataFactory.getEncryptionBlockSize() - 1 if this is an encypted database
if (encrypted) {
if (out.getUsed() > (dataFactory.getEncryptionBlockSize() - 1))
writeToFile();
} else if (out.getUsed() > 0) {
writeToFile();
}
} catch (IOException ioe) {
// handle IO error...
throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
} finally {
close();
}
}
use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class DeleteOperation method writeOptionalDataToBuffer.
/**
* if logical undo, writes out the row that was deleted
*
* @exception IOException Can be thrown by any of the methods of ObjectOutput
* @exception StandardException Standard Derby policy.
*/
private void writeOptionalDataToBuffer(RawTransaction t) throws StandardException, IOException {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(this.page != null);
}
DynamicByteArrayOutputStream logBuffer = t.getLogBuffer();
int optionalDataStart = logBuffer.getPosition();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(optionalDataStart == 0, "Buffer for writing the optional data should start at position 0");
}
if (undo != null)
this.page.logRecord(doMeSlot, BasePage.LOG_RECORD_DEFAULT, recordId, (FormatableBitSet) null, logBuffer, (RecordHandle) null);
int optionalDataLength = logBuffer.getPosition() - optionalDataStart;
if (SanityManager.DEBUG) {
if (optionalDataLength != logBuffer.getUsed())
SanityManager.THROWASSERT("wrong optional data length, optionalDataLength = " + optionalDataLength + ", logBuffer.getUsed() = " + logBuffer.getUsed());
}
// set the position to the beginning of the buffer
logBuffer.setPosition(optionalDataStart);
this.preparedLog = new ByteArray(logBuffer.getByteArray(), optionalDataStart, optionalDataLength);
}
Aggregations