use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.
the class StreamFileContainer method writeColumn.
private void writeColumn(Object column) throws StandardException, IOException {
int fieldStatus = FIELD_STATUS;
if (column == null) {
// just write a non-existent header.
fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
StoredFieldHeader.write(out, fieldStatus, 0, LARGE_SLOT_SIZE);
return;
}
// if the column is a null column, write the field header now.
if (column instanceof Storable) {
Storable sColumn = (Storable) column;
if (sColumn.isNull()) {
fieldStatus = StoredFieldHeader.setNull(fieldStatus, true);
StoredFieldHeader.write(out, fieldStatus, 0, LARGE_SLOT_SIZE);
return;
}
}
int beginPosition = out.getPosition();
int fieldDataLength = 0;
// write out the header, mostly to reserve the space
StoredFieldHeader.write(out, fieldStatus, fieldDataLength, LARGE_SLOT_SIZE);
if (column instanceof StreamStorable) {
if (((StreamStorable) column).returnStream() != null) {
column = (InputStream) ((StreamStorable) column).returnStream();
}
}
if (column instanceof InputStream) {
InputStream inColumn = (InputStream) column;
// Set a reasonable buffer size.
// To avoid extremely inefficient reads, and an infinite loop when
// InputStream.available() returns zero, a lower limit is set on
// the buffer size. To avoid using too much memory (especially in
// multi-user environments) an upper limit is set as well.
// The limits can be tuned, but note that using a too high default
// or lower limit can put unnecessary pressure on the memory sub-
// system and the GC process.
int bufferLen = Math.min(Math.max(inColumn.available(), 64), 8192);
byte[] bufData = new byte[bufferLen];
do {
int lenRead = inColumn.read(bufData);
if (lenRead != -1) {
fieldDataLength += lenRead;
out.write(bufData, 0, lenRead);
} else {
break;
}
} while (true);
} else if (column instanceof Storable) {
Storable sColumn = (Storable) column;
// write field data to the stream, we already handled the null case
sColumn.writeExternal(logicalDataOut);
fieldDataLength = out.getPosition() - beginPosition - FIELD_HEADER_SIZE;
} else {
// Serializable/Externalizable/Formattable
// all look the same at this point.
logicalDataOut.writeObject(column);
fieldDataLength = out.getPosition() - beginPosition - FIELD_HEADER_SIZE;
}
// Now we go back to update the fieldDataLength in the field header
int endPosition = out.getPosition();
out.setPosition(beginPosition);
StoredFieldHeader.write(out, fieldStatus, fieldDataLength, LARGE_SLOT_SIZE);
// set position to the end of the field
if (!StoredFieldHeader.isNull(fieldStatus))
out.setPosition(endPosition);
}
use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.
the class UpdateResultSet method checkStreamCols.
/* Following 2 methods are for checking and make sure we don't have one un-objectified stream
* to be inserted into 2 temp table rows for deferred update. Otherwise it would cause problem
* when writing to disk using the stream a second time. In other cases we don't want to
* unnecessarily objectify the stream. beetle 4896.
*/
private FormatableBitSet checkStreamCols() {
DataValueDescriptor[] cols = row.getRowArray();
FormatableBitSet streamCols = null;
for (int i = 0; i < numberOfBaseColumns; i++) {
if (// check new values
cols[i + numberOfBaseColumns] instanceof StreamStorable) {
if (streamCols == null)
streamCols = new FormatableBitSet(numberOfBaseColumns);
streamCols.set(i);
}
}
return streamCols;
}
use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.
the class MergeResultSet method collectAffectedRows.
/**
* <p>
* Loop through the rows in the driving left join.
* </p>
*/
boolean collectAffectedRows() throws StandardException {
DataValueDescriptor rlColumn;
boolean rowsFound = false;
while (true) {
// may need to objectify stream columns here.
// see DMLWriteResultSet.getNextRowCoure(NoPutResultSet)
_row = _drivingLeftJoin.getNextRowCore();
if (_row == null) {
break;
}
// By convention, the last column for the driving left join contains a data value
// containing the RowLocation of the target row.
rowsFound = true;
rlColumn = _row.getColumn(_row.nColumns());
SQLRef baseRowLocation = null;
boolean matched = false;
if (rlColumn != null) {
if (!rlColumn.isNull()) {
matched = true;
// change the HeapRowLocation into a SQLRef, something which the
// temporary table can (de)serialize correctly
baseRowLocation = new SQLRef((RowLocation) rlColumn.getObject());
_row.setColumn(_row.nColumns(), baseRowLocation);
}
}
// find the first clause which applies to this row
MatchingClauseConstantAction matchingClause = null;
int clauseCount = _constants.matchingClauseCount();
int clauseIdx = 0;
for (; clauseIdx < clauseCount; clauseIdx++) {
MatchingClauseConstantAction candidate = _constants.getMatchingClause(clauseIdx);
boolean isWhenMatchedClause = false;
switch(candidate.clauseType()) {
case ConstantAction.WHEN_MATCHED_THEN_UPDATE:
case ConstantAction.WHEN_MATCHED_THEN_DELETE:
isWhenMatchedClause = true;
break;
}
boolean considerClause = (matched == isWhenMatchedClause);
if (considerClause) {
if (candidate.evaluateRefinementClause(activation)) {
matchingClause = candidate;
break;
}
}
}
if (matchingClause != null) {
// this will raise an exception if the row is being touched more than once
if (baseRowLocation != null) {
addSubjectRow(baseRowLocation);
}
//
for (int i = 0; i < _row.nColumns(); i++) {
DataValueDescriptor dvd = _row.getColumn(i + 1);
if (dvd instanceof StreamStorable) {
if (dvd.hasStream()) {
_row.setColumn(i + 1, dvd.cloneValue(true));
}
}
}
_thenRows[clauseIdx] = matchingClause.bufferThenRow(activation, _thenRows[clauseIdx], _row);
_rowCount++;
}
}
return rowsFound;
}
use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.
the class DMLWriteResultSet method objectifyStreams.
private void objectifyStreams(ExecRow row) throws StandardException {
// therefore, the object can be used to multiple rows.
if ((row != null) && (streamStorableHeapColIds != null)) {
for (int ix = 0; ix < streamStorableHeapColIds.length; ix++) {
int heapIx = streamStorableHeapColIds[ix];
int readIx = (baseRowReadMap == null) ? heapIx : baseRowReadMap[heapIx];
DataValueDescriptor col = row.getColumn(readIx + 1);
// Derby-4779
if (col != null) {
InputStream stream = ((StreamStorable) col).returnStream();
((StreamStorable) col).loadStream();
if (stream != null)
for (int i = 1; i <= row.nColumns(); i++) {
DataValueDescriptor c = row.getColumn(i);
if (c instanceof StreamStorable)
if (((StreamStorable) c).returnStream() == stream)
row.setColumn(i, col.cloneValue(false));
}
}
}
}
}
use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.
the class StoredPage method readRecordFromStream.
/**
* restore a record from a stream.
* <p>
* The rawDataIn stream is expected to be positioned after the record
* header.
*
* @return The identifier to be used to open the conglomerate later.
*
* @param row restore row into this object array.
* @param max_colid The maximum numbered column id that will be
* requested by caller. It should be:
* min(row.length - 1, maximum bit set in vCols)
* It is used to stop the inner most loop from
* looking at more columns in the row.
* @param vCols If not null, bit map indicates valid cols.
* @param mCols If not null, int array indicates columns already
* read in from the stream. A non-zero entry
* means the column has already been read in.
* @param dataIn restore row from this stream.
* @param recordHeader The record header of the row, it was read in
* from stream and dataIn is positioned after it.
* @param recordToLock The head row to use for locking, used to lock
* head row of overflow columns/rows.
*
* @exception StandardException Standard exception policy.
*/
private final boolean readRecordFromStream(Object[] row, int max_colid, int[] vCols, int[] mCols, LimitObjectInput dataIn, StoredRecordHeader recordHeader, RecordHandle recordToLock) throws StandardException, IOException {
ErrorObjectInput inUserCode = null;
try {
// Get the number of columns in the row.
int numberFields = recordHeader.getNumberFields();
int startColumn = recordHeader.getFirstField();
if (startColumn > max_colid) {
// done if the startColumn is higher than highest column.
return true;
}
// For each column in the row, restore the column from
// the corresponding field in the record. If the field
// is missing or not set, set the column to null.
int highestColumnOnPage = numberFields + startColumn;
int vColsSize = (vCols == null) ? 0 : vCols.length;
for (int columnId = startColumn; columnId <= max_colid; columnId++) {
// that have already been read.
if (((vCols != null) && (!(vColsSize > columnId && (vCols[columnId] != 0)))) || ((mCols != null) && (mCols[columnId] != 0))) {
if (columnId < highestColumnOnPage) {
// If the field exists in the row on the page, but the
// partial row being returned does not include it,
// skip the field ...
skipField(dataIn);
}
continue;
}
// that this record has
if (columnId >= highestColumnOnPage) {
// field is non-existent
Object column = row[columnId];
if (column instanceof DataValueDescriptor) {
// RESOLVE - This is in place for 1.2. In the future
// we may want to return this column as non-existent
// even if it is a storable column, or maybe use a
// supplied default.
((DataValueDescriptor) column).restoreToNull();
} else {
row[columnId] = null;
}
continue;
}
// read the field header
int fieldStatus = StoredFieldHeader.readStatus(dataIn);
int fieldDataLength = StoredFieldHeader.readFieldDataLength(dataIn, fieldStatus, slotFieldSize);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(!StoredFieldHeader.isExtensible(fieldStatus), "extensible fields not supported yet");
}
Object column = row[columnId];
OverflowInputStream overflowIn = null;
// field is non-existent, return null
if (StoredFieldHeader.isNonexistent(fieldStatus)) {
if (column instanceof DataValueDescriptor) {
// RESOLVE - This is in place for 1.2. In the future
// we may want to return this column as non-existent
// even if it is a storable column, or maybe use a
// supplied default.
((DataValueDescriptor) column).restoreToNull();
} else {
row[columnId] = null;
}
continue;
}
boolean isOverflow = StoredFieldHeader.isOverflow(fieldStatus);
if (isOverflow) {
// A fetched long column needs to be returned as a stream
//
long overflowPage = CompressedNumber.readLong((InputStream) dataIn);
int overflowId = CompressedNumber.readInt((InputStream) dataIn);
// Prepare the stream for results...
// create the byteHolder the size of a page, so, that it
// will fit the field Data that would fit on a page.
MemByteHolder byteHolder = new MemByteHolder(pageData.length);
overflowIn = new OverflowInputStream(byteHolder, owner, overflowPage, overflowId, recordToLock);
}
// Deal with Object columns
if (column instanceof DataValueDescriptor) {
DataValueDescriptor sColumn = (DataValueDescriptor) column;
// is the column null ?
if (StoredFieldHeader.isNull(fieldStatus)) {
sColumn.restoreToNull();
continue;
}
// set the limit for the user read
if (!isOverflow) {
// normal, non-overflow column case.
dataIn.setLimit(fieldDataLength);
inUserCode = dataIn;
sColumn.readExternal(dataIn);
inUserCode = null;
int unread = dataIn.clearLimit();
if (unread != 0)
DataInputUtil.skipFully(dataIn, unread);
} else {
// column being fetched is a Object long column.
FormatIdInputStream newIn = new FormatIdInputStream(overflowIn);
// if a column is a long column, store recommends user
// fetch it as a stream.
boolean fetchStream = true;
if (!(sColumn instanceof StreamStorable)) {
fetchStream = false;
}
if (fetchStream) {
((StreamStorable) sColumn).setStream(newIn);
} else {
inUserCode = newIn;
sColumn.readExternal(newIn);
inUserCode = null;
}
}
continue;
}
if (StoredFieldHeader.isNull(fieldStatus)) {
throw StandardException.newException(SQLState.DATA_NULL_STORABLE_COLUMN, Integer.toString(columnId));
}
// This is a non-extensible field, which means the caller must
// know the correct type and thus the element in row is the
// correct type or null. It must be Serializable.
//
// We do not support Externalizable here.
dataIn.setLimit(fieldDataLength);
inUserCode = dataIn;
row[columnId] = (Object) dataIn.readObject();
inUserCode = null;
int unread = dataIn.clearLimit();
if (unread != 0)
DataInputUtil.skipFully(dataIn, unread);
continue;
}
if ((numberFields + startColumn) > max_colid)
return true;
else
return false;
} catch (IOException ioe) {
if (inUserCode != null) {
dataIn.clearLimit();
if (ioe instanceof EOFException) {
if (SanityManager.DEBUG) {
SanityManager.DEBUG_PRINT("DEBUG_TRACE", "StoredPage - EOF while restoring record: " + recordHeader + "Page dump = " + this);
}
// an EOFException when it sees the -1 from a read
throw StandardException.newException(SQLState.DATA_STORABLE_READ_MISMATCH, ioe, inUserCode.getErrorInfo());
}
// some SQLData error reporting
Exception ne = inUserCode.getNestedException();
if (ne != null) {
if (ne instanceof InstantiationException) {
throw StandardException.newException(SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION, ne, inUserCode.getErrorInfo());
}
if (ne instanceof IllegalAccessException) {
throw StandardException.newException(SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION, ne, inUserCode.getErrorInfo());
}
if (ne instanceof StandardException) {
throw (StandardException) ne;
}
}
throw StandardException.newException(SQLState.DATA_STORABLE_READ_EXCEPTION, ioe, inUserCode.getErrorInfo());
}
// re-throw to higher levels so they can put it in correct context.
throw ioe;
} catch (ClassNotFoundException cnfe) {
dataIn.clearLimit();
// make the database corrupt, just that this field is inaccessable
throw StandardException.newException(SQLState.DATA_STORABLE_READ_MISSING_CLASS, cnfe, inUserCode.getErrorInfo());
} catch (LinkageError le) {
// Some error during the link of a user class
if (inUserCode != null) {
dataIn.clearLimit();
throw StandardException.newException(SQLState.DATA_STORABLE_READ_EXCEPTION, le, inUserCode.getErrorInfo());
}
throw le;
}
}
Aggregations