use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class InsertOperation method writeOptionalDataToBuffer.
/**
* Writes out the row that is to be inserted as the optional data.
*
* @exception IOException Can be thrown by any of the methods of ObjectOutput
* @exception StandardException Standard Derby policy.
*/
private void writeOptionalDataToBuffer(RawTransaction t, DynamicByteArrayOutputStream logBuffer, Object[] row, FormatableBitSet validColumns, boolean isLongColumn, int realStartColumn, int realSpaceOnPage, int overflowThreshold) throws StandardException, IOException {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(this.page != null);
}
DynamicByteArrayOutputStream localLogBuffer = null;
if (logBuffer != null) {
localLogBuffer = (DynamicByteArrayOutputStream) logBuffer;
} else {
realStartColumn = -1;
realSpaceOnPage = -1;
localLogBuffer = t.getLogBuffer();
}
if (isLongColumn) {
this.startColumn = this.page.logLongColumn(doMeSlot, recordId, row[0], localLogBuffer);
} else {
this.startColumn = this.page.logRow(doMeSlot, true, recordId, row, validColumns, localLogBuffer, this.startColumn, insertFlag, realStartColumn, realSpaceOnPage, overflowThreshold);
}
int optionalDataStart = localLogBuffer.getBeginPosition();
int optionalDataLength = localLogBuffer.getPosition() - optionalDataStart;
this.preparedLog = new ByteArray(localLogBuffer.getByteArray(), optionalDataStart, optionalDataLength);
}
use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class StoredPage method storeRecordForUpdate.
private void storeRecordForUpdate(int slot, ObjectInput in) throws StandardException, IOException {
// set up to read the in-memory record header back from the record
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
StoredRecordHeader newRecorderHeader = new StoredRecordHeader();
// recordHeader represents the new version of the record header.
newRecorderHeader.read(in);
int oldFieldCount = recordHeader.getNumberFields();
int newFieldCount = newRecorderHeader.getNumberFields();
int startField = recordHeader.getFirstField();
if (SanityManager.DEBUG) {
if (startField != newRecorderHeader.getFirstField())
SanityManager.THROWASSERT("First field changed from " + startField + " to " + newRecorderHeader.getFirstField());
}
// on an update that changes the row to be overflowed.
if (newFieldCount < oldFieldCount) {
int oldDataStartingOffset = getFieldOffset(slot, startField + newFieldCount);
// calculate the length of the to be deleted fields
int deleteLength = getRecordOffset(slot) + getRecordPortionLength(slot) - oldDataStartingOffset;
// we are updateing to zero bytes!
updateRecordPortionLength(slot, -(deleteLength), deleteLength);
}
// write each field out to the page
int startingOffset = getRecordOffset(slot);
int newOffset = startingOffset;
int oldOffset = startingOffset;
// see which field we get to use the reserve space
int reservedSpaceFieldId = newFieldCount < oldFieldCount ? newFieldCount - 1 : oldFieldCount - 1;
reservedSpaceFieldId += startField;
// the new data the needs to be written at newOffset but can't until
// unsedSpace >= newDataToWrite.length (allowing for the header)
DynamicByteArrayOutputStream newDataToWrite = null;
rawDataOut.setPosition(newOffset);
// write the record header, which may change in size
int oldLength = recordHeader.size();
int newLength = newRecorderHeader.size();
// the unused space at newOffset
int unusedSpace = oldLength;
// no fields, so we can eat into the reserve space
if (// no fields
reservedSpaceFieldId < startField)
unusedSpace += getReservedCount(slot);
if (unusedSpace >= newLength) {
newRecorderHeader.write(rawDataOut);
newOffset += newLength;
unusedSpace -= newLength;
} else {
newDataToWrite = new DynamicByteArrayOutputStream(getPageSize());
newRecorderHeader.write(newDataToWrite);
}
oldOffset += oldLength;
int recordDelta = (newLength - oldLength);
int oldFieldStatus = 0;
int oldFieldDataLength = 0;
int newFieldStatus = 0;
int newFieldDataLength = 0;
int oldEndFieldExclusive = startField + oldFieldCount;
int newEndFieldExclusive = startField + newFieldCount;
for (int fieldId = startField; fieldId < newEndFieldExclusive; fieldId++) {
int oldFieldLength = 0;
if (fieldId < oldEndFieldExclusive) {
rawDataIn.setPosition(oldOffset);
oldFieldStatus = StoredFieldHeader.readStatus(rawDataIn);
oldFieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, oldFieldStatus, slotFieldSize);
oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize) + oldFieldDataLength;
}
newFieldStatus = StoredFieldHeader.readStatus(in);
newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);
// unless the old field didn't exist.
if (StoredFieldHeader.isNonexistent(newFieldStatus) && (fieldId < oldEndFieldExclusive)) {
// may need to move this old field ...
if ((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) {
// the correct position already?
if (newOffset == oldOffset) {
// yes, nothing to do!!
if (SanityManager.DEBUG) {
if (unusedSpace != 0)
SanityManager.THROWASSERT("Unused space is out of sync, expect 0 got " + unusedSpace);
}
} else {
// need to shift the field left
if (SanityManager.DEBUG) {
if (unusedSpace != (oldOffset - newOffset))
SanityManager.THROWASSERT("Unused space is out of sync expected " + (oldOffset - newOffset) + " got " + unusedSpace);
}
System.arraycopy(pageData, oldOffset, pageData, newOffset, oldFieldLength);
}
newOffset += oldFieldLength;
// last field to be updated can eat into the reserve space
if (fieldId == reservedSpaceFieldId)
unusedSpace += getReservedCount(slot);
} else {
// there is data still to be written, just append this field to the
// saved data
int position = newDataToWrite.getPosition();
newDataToWrite.setPosition(position + oldFieldLength);
System.arraycopy(pageData, oldOffset, newDataToWrite.getByteArray(), position, oldFieldLength);
unusedSpace += oldFieldLength;
// last field to be updated can eat into the reserve space
if (fieldId == reservedSpaceFieldId)
unusedSpace += getReservedCount(slot);
// attempt to write out some of what we have in the side buffer now.
int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
newOffset += copyLength;
unusedSpace -= copyLength;
}
oldOffset += oldFieldLength;
continue;
}
newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);
int newFieldHeaderLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize);
int newFieldLength = newFieldHeaderLength + newFieldDataLength;
recordDelta += (newFieldLength - oldFieldLength);
// See if we can write this field now
// space available increases by the amount of the old field
unusedSpace += oldFieldLength;
oldOffset += oldFieldLength;
// last field to be updated can eat into the reserve space
if (fieldId == reservedSpaceFieldId)
unusedSpace += getReservedCount(slot);
if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {
// catch up on the old data if possible
int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
newOffset += copyLength;
unusedSpace -= copyLength;
}
if (((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) && (unusedSpace >= newFieldHeaderLength)) {
// can fit the header in
rawDataOut.setPosition(newOffset);
newOffset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
unusedSpace -= newFieldHeaderLength;
if (newFieldDataLength != 0) {
// read as much as the field as possible
int fieldCopy = unusedSpace >= newFieldDataLength ? newFieldDataLength : unusedSpace;
if (fieldCopy != 0) {
in.readFully(pageData, newOffset, fieldCopy);
newOffset += fieldCopy;
unusedSpace -= fieldCopy;
}
fieldCopy = newFieldDataLength - fieldCopy;
if (fieldCopy != 0) {
if (newDataToWrite == null)
newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);
// append the remaining portion of the field to the saved data
int position = newDataToWrite.getPosition();
newDataToWrite.setPosition(position + fieldCopy);
in.readFully(newDataToWrite.getByteArray(), position, fieldCopy);
}
}
} else {
if (newDataToWrite == null)
newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);
StoredFieldHeader.write(newDataToWrite, newFieldStatus, newFieldDataLength, slotFieldSize);
// save the new field data
if (newFieldDataLength != 0) {
int position = newDataToWrite.getPosition();
newDataToWrite.setPosition(position + newFieldDataLength);
in.readFully(newDataToWrite.getByteArray(), position, newFieldDataLength);
}
}
}
// at this point there may still be data left in the saved buffer
// but presumably we can't fit it in
int reservedDelta;
if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {
// need to shift the later records down ...
int nextRecordOffset = startingOffset + getTotalSpace(slot);
int spaceRequiredFromFreeSpace = newDataToWrite.getUsed() - (nextRecordOffset - newOffset);
if (SanityManager.DEBUG) {
if (newOffset > nextRecordOffset)
SanityManager.THROWASSERT("data has overwritten next record - offset " + newOffset + " next record " + nextRecordOffset);
if ((spaceRequiredFromFreeSpace <= 0) || (spaceRequiredFromFreeSpace > freeSpace))
SanityManager.THROWASSERT("invalid space required " + spaceRequiredFromFreeSpace + " newDataToWrite.getUsed() " + newDataToWrite.getUsed() + " nextRecordOffset " + nextRecordOffset + " newOffset " + newOffset + " reservedSpaceFieldId " + reservedSpaceFieldId + " startField " + startField + " newEndFieldExclusive " + newEndFieldExclusive + " newFieldCount " + newFieldCount + " oldFieldCount " + oldFieldCount + " slot " + slot + " freeSpace " + freeSpace + " unusedSpace " + unusedSpace + " page " + getPageId());
if ((getReservedCount(slot) + spaceRequiredFromFreeSpace) != recordDelta)
SanityManager.THROWASSERT("mismatch on count: reserved " + getReservedCount(slot) + "free space take " + spaceRequiredFromFreeSpace + "record delta " + recordDelta);
}
if (spaceRequiredFromFreeSpace > freeSpace) {
throw dataFactory.markCorrupt(StandardException.newException(SQLState.DATA_CORRUPT_PAGE, getPageId()));
}
// see if this is the last record on the page, if so a simple
// shift of the remaining fields will sufice...
expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);
unusedSpace += spaceRequiredFromFreeSpace;
moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
reservedDelta = -1 * getReservedCount(slot);
if (SanityManager.DEBUG) {
if (newDataToWrite.getUsed() != 0)
SanityManager.THROWASSERT("data is left in save buffer ... " + newDataToWrite.getUsed());
}
} else {
reservedDelta = -1 * recordDelta;
}
// now reset the length in the slot entry
updateRecordPortionLength(slot, recordDelta, reservedDelta);
setHeaderAtSlot(slot, newRecorderHeader);
}
use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class StoredPage method handleIncompleteLogRow.
/**
* Handle an update of a record portion that is incomplete.
* <p>
* Handle an update of a record portion that is incomplete.
* Ie. Columns have expanded that require other columns to move
* off the page into a new portion.
* <P>
* This method works out of the columns that need to be moved which are not
* being updated and makes a copy of their data. It then throws an
* exception with this data, much like the long column exception which will
* then allow the original insert to complete.
* <P>
* If no columns need to be saved (ie all the ones that would move are
* being updated) then no exception is thrown, logRow() will return and the
* update completes normally.
* <p>
*
* @param slot slot of the current update.
* @param startColumn column to start at, handles start in middle of row
* @param columnList bit map indicating which columns are being updated.
* @param out place to lot to.
*
* @exception StandardException Standard exception policy.
*/
private void handleIncompleteLogRow(int slot, int startColumn, FormatableBitSet columnList, DynamicByteArrayOutputStream out) throws StandardException {
if (SanityManager.DEBUG)
SanityManager.ASSERT(columnList != null);
StoredRecordHeader rh = getHeaderAtSlot(slot);
int endFieldExclusive = rh.getFirstField() + rh.getNumberFields();
// first see if any fields are not being modified
boolean needSave = false;
int columnListSize = columnList.size();
for (int i = startColumn; i < endFieldExclusive; i++) {
if (!(columnListSize > i && columnList.get(i))) {
needSave = true;
break;
}
}
if (!needSave)
return;
Object[] savedFields = new Object[endFieldExclusive - startColumn];
ByteArrayOutputStream fieldStream = null;
for (int i = startColumn; i < endFieldExclusive; i++) {
// row is being updated - ignore
if (columnListSize > i && columnList.get(i))
continue;
try {
// so that we preserve the state of the field header.
if (fieldStream == null)
fieldStream = new ByteArrayOutputStream();
else
fieldStream.reset();
logField(slot, i, fieldStream);
savedFields[i - startColumn] = new RawField(fieldStream.toByteArray());
} catch (IOException ioe) {
throw dataFactory.markCorrupt(StandardException.newException(SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
}
}
// Use a long column exception to notify the caller of the need
// to perform an insert of the columns that need to move
LongColumnException lce = new LongColumnException();
lce.setExceptionInfo(out, startColumn, -1);
lce.setColumn(savedFields);
throw lce;
}
use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class StoredPage method doUpdateAtSlot.
/**
* Perform an update.
*
* @exception StandardException Standard Derby policy
*/
public void doUpdateAtSlot(RawTransaction t, int slot, int id, Object[] row, FormatableBitSet validColumns) throws StandardException {
// If this is a head page, the recordHandle is the head row handle.
// If this is not a head page, we are calling updateAtSlot inside some
// convoluted loop that updates an overflow chain. There is nothing we
// can doing about it anyway.
RecordHandle headRowHandle = isOverflowPage() ? null : getRecordHandleAtSlot(slot);
// RESOLVE: djd/yyz what does a null row means? (sku)
if (row == null) {
owner.getActionSet().actionUpdate(t, this, slot, id, row, validColumns, -1, (DynamicByteArrayOutputStream) null, -1, headRowHandle);
return;
}
// startColumn is the first column to be updated.
int startColumn = RowUtil.nextColumn(row, validColumns, 0);
if (startColumn == -1)
return;
if (SanityManager.DEBUG) {
// exactly N columns are passed in via the row array.
if (!isOverflowPage() && validColumns != null) {
if (RowUtil.getNumberOfColumns(-1, validColumns) > row.length)
SanityManager.THROWASSERT("updating slot " + slot + " on page " + getIdentity() + " " + RowUtil.getNumberOfColumns(-1, validColumns) + " bits are set in validColumns but only " + row.length + " columns in row[]");
}
}
// Keep track of row shrinkage in the head row piece. If any row piece
// shrinks, file a post commit work to clear all reserved space for the
// entire row chain.
boolean rowHasReservedSpace = false;
StoredPage curPage = this;
for (; ; ) {
StoredRecordHeader rh = curPage.getHeaderAtSlot(slot);
int startField = rh.getFirstField();
int endFieldExclusive = startField + rh.getNumberFields();
// curPage contains column[startField] to column[endFieldExclusive-1]
// Need to cope with an update that is increasing the number of
// columns. If this occurs we want to make sure that we perform a
// single update to the last portion of a record, and not an update
// of the current columns and then an update to append a column.
long nextPage = -1;
int realStartColumn = -1;
int realSpaceOnPage = -1;
if (!rh.hasOverflow() || ((startColumn >= startField) && (startColumn < endFieldExclusive))) {
boolean hitLongColumn;
int nextColumn = -1;
Object[] savedFields = null;
DynamicByteArrayOutputStream logBuffer = null;
do {
try {
// Update this portion of the record.
// Pass in headRowHandle in case we are to update any
// long column and they need to be cleaned up by post
// commit processing. We don't want to purge the
// columns right now because in order to reclaim the
// page, we need to remove them. But it would be bad
// to remove them now because the transaction may not
// commit for a long time. We can do both purging of
// the long column and page removal together in the
// post commit.
nextColumn = owner.getActionSet().actionUpdate(t, curPage, slot, id, row, validColumns, realStartColumn, logBuffer, realSpaceOnPage, headRowHandle);
hitLongColumn = false;
} catch (LongColumnException lce) {
if (lce.getRealSpaceOnPage() == -1) {
// an update that has caused the row to increase
// in size *and* push some fields off the page
// that need to be inserted in an overflow page
// no need to make a copy as we are going to use
// this buffer right away
logBuffer = lce.getLogBuffer();
savedFields = (Object[]) lce.getColumn();
realStartColumn = lce.getNextColumn();
realSpaceOnPage = -1;
hitLongColumn = true;
continue;
}
// we caught a real long column exception
// three things should happen here:
// 1. insert the long column into overflow pages.
// 2. append the overflow field header in the main chain.
// 3. continue the update in the main data chain.
logBuffer = new DynamicByteArrayOutputStream(lce.getLogBuffer());
// step 1: insert the long column ... if this update
// operation rolls back, purge the after image column
// chain and reclaim the overflow page because the
// whole chain will be orphaned anyway.
RecordHandle longColumnHandle = insertLongColumn(curPage, lce, Page.INSERT_UNDO_WITH_PURGE);
// step 2: append overflow field header to log buffer
int overflowFieldLen = 0;
try {
overflowFieldLen += appendOverflowFieldHeader(logBuffer, longColumnHandle);
} catch (IOException ioe) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
// step 3: continue the insert in the main data chain
// need to pass the log buffer, and start column to the
// next insert.
realStartColumn = lce.getNextColumn() + 1;
realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;
hitLongColumn = true;
} catch (NoSpaceOnPage nsop) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_NO_SPACE_ON_PAGE, nsop, ((PageKey) curPage.getIdentity()).toString(), getPageDumpString(), slot, id, validColumns.toString(), realStartColumn, 0, headRowHandle);
}
} while (hitLongColumn);
// See if we completed all the columns that are on this page.
int validColumnsSize = (validColumns == null) ? 0 : validColumns.getLength();
if (nextColumn != -1) {
if (SanityManager.DEBUG) {
if ((nextColumn < startField) || (rh.hasOverflow() && (nextColumn >= endFieldExclusive))) {
SanityManager.THROWASSERT("nextColumn out of range = " + nextColumn + " expected between " + startField + " and " + endFieldExclusive);
}
}
// Need to insert rows from nextColumn to endFieldExclusive
// onto a new overflow page.
// If the column is not being updated we
// pick it up from the current page. If it is being updated
// we take it from the new value.
int possibleLastFieldExclusive = endFieldExclusive;
if (!rh.hasOverflow()) {
// we might be adding a field here
if (validColumns == null) {
if (row.length > possibleLastFieldExclusive)
possibleLastFieldExclusive = row.length;
} else {
if (validColumnsSize > possibleLastFieldExclusive)
possibleLastFieldExclusive = validColumnsSize;
}
}
// use a sparse row
Object[] newRow = new Object[possibleLastFieldExclusive];
FormatableBitSet newColumnList = new FormatableBitSet(possibleLastFieldExclusive);
ByteArrayOutputStream fieldStream = null;
for (int i = nextColumn; i < possibleLastFieldExclusive; i++) {
if ((validColumns == null) || (validColumnsSize > i && validColumns.isSet(i))) {
newColumnList.set(i);
// use the new value
newRow[i] = RowUtil.getColumn(row, validColumns, i);
} else if (i < endFieldExclusive) {
newColumnList.set(i);
// use the old value
newRow[i] = savedFields[i - nextColumn];
}
}
RecordHandle handle = curPage.getRecordHandleAtSlot(slot);
// there cannot be any updates to do.
if (rh.hasOverflow()) {
// We have to carry across the overflow information
// from the current record, if any.
nextPage = rh.getOverflowPage();
id = rh.getOverflowId();
// find the next starting column before unlatching page
startColumn = RowUtil.nextColumn(row, validColumns, endFieldExclusive);
} else {
startColumn = -1;
nextPage = 0;
}
// Don't bother with temp container.
if (!rowHasReservedSpace && headRowHandle != null && curPage != null && !owner.isTemporaryContainer()) {
rowHasReservedSpace = curPage.checkRowReservedSpace(slot);
}
// insert the record portion on a new overflow page at slot
// 0 this will automatically handle any overflows in
// this new portion
// BasePage op = getNewOverflowPage();
BasePage op = curPage.getOverflowPageForInsert(slot, newRow, newColumnList, nextColumn);
// We have all the information from this page so unlatch it
if (curPage != this) {
curPage.unlatch();
curPage = null;
}
byte mode = Page.INSERT_OVERFLOW;
if (nextPage != 0)
mode |= Page.INSERT_FOR_SPLIT;
RecordHandle nextPortionHandle = nextPage == 0 ? null : owner.makeRecordHandle(nextPage, id);
// RESOLVED (sku): even though we would like to roll back
// these inserts with PURGE rather than with delete,
// we have to delete because if we purge the last row
// from an overflow page, the purge will queue a post
// commit to remove the page.
// While this is OK with long columns, we cannot do this
// for long rows because long row overflow pages can be
// shared by more than one long rows, and thus it is unsafe
// to remove the page without first latching the head page.
// However, the insert log record do not have the head
// row's page number so the rollback cannot put that
// information into the post commit work.
RecordHandle portionHandle;
try {
portionHandle = op.insertAllowOverflow(0, newRow, newColumnList, nextColumn, mode, 100, nextPortionHandle);
} catch (NoSpaceOnPage nsop) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_NO_SPACE_ON_PAGE, nsop, ((PageKey) op.getIdentity()).toString(), getPageDumpString(), slot, id, newColumnList.toString(), nextColumn, mode, nextPortionHandle);
}
// Update the previous record header to point to new portion
if (curPage == this)
updateOverflowDetails(this, handle, portionHandle);
else
updateOverflowDetails(handle, portionHandle);
op.unlatch();
} else {
// See earlier comments on checking row reserved space.
if (!rowHasReservedSpace && headRowHandle != null && curPage != null && !owner.isTemporaryContainer()) {
rowHasReservedSpace = curPage.checkRowReservedSpace(slot);
}
// find the next starting column before we unlatch the page
startColumn = rh.hasOverflow() ? RowUtil.nextColumn(row, validColumns, endFieldExclusive) : -1;
}
// have we completed this update?
if (startColumn == -1) {
if ((curPage != this) && (curPage != null))
curPage.unlatch();
// break out of the for loop
break;
}
}
if (nextPage == -1) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage != null, "Current page is null be no overflow information has been obtained");
}
// Get the next page info while we still have the page
// latched.
nextPage = rh.getOverflowPage();
id = rh.getOverflowId();
}
if ((curPage != this) && (curPage != null))
curPage.unlatch();
// get the next portion page and find the correct slot
curPage = (StoredPage) owner.getPage(nextPage);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage.isOverflowPage(), "following row chain gets a non-overflow page");
}
slot = curPage.findRecordById(id, FIRST_SLOT_NUMBER);
}
// row post commit.
if (rowHasReservedSpace) {
RawTransaction rxact = (RawTransaction) owner.getTransaction();
ReclaimSpace work = new ReclaimSpace(ReclaimSpace.ROW_RESERVE, headRowHandle, rxact.getDataFactory(), true);
rxact.addPostCommitWork(work);
}
}
use of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream in project derby by apache.
the class DDMWriter method writeUDT.
/**
* Write a value of a user defined type.
*
* @param val object to be written
*
* @exception DRDAProtocolException
*/
protected void writeUDT(Object val, int index) throws DRDAProtocolException {
// should not be called if val is null
if (SanityManager.DEBUG) {
if (val == null) {
SanityManager.THROWASSERT("UDT is null");
}
}
byte[] buffer = null;
int length = 0;
try {
DynamicByteArrayOutputStream dbaos = new DynamicByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(dbaos);
oos.writeObject(val);
buffer = dbaos.getByteArray();
length = dbaos.getUsed();
} catch (IOException e) {
agent.markCommunicationsFailure(e, "DDMWriter.writeUDT()", "", e.getMessage(), "");
}
if (length > DRDAConstants.MAX_DRDA_UDT_SIZE) {
agent.markCommunicationsFailure("DDMWriter.writeUDT()", "User defined type is longer than " + DRDAConstants.MAX_DRDA_UDT_SIZE + " bytes.", "", "");
} else {
writeShort(length);
writeBytes(buffer, 0, length);
}
}
Aggregations