Search in sources :

Example 1 with StreamStorable

use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.

the class InsertResultSet method normalInsertCore.

// Do the work for a "normal" insert
private void normalInsertCore(LanguageConnectionContext lcc, boolean firstExecute) throws StandardException {
    boolean setUserIdentity = constants.hasAutoincrement() && isSingleRowResultSet();
    ExecRow deferredRowBuffer;
    long user_autoinc = 0;
    /* Get or re-use the row changer.
		 */
    if (firstExecute) {
        rowChanger = lcc.getLanguageConnectionFactory().getExecutionFactory().getRowChanger(heapConglom, constants.heapSCOCI, heapDCOCI, constants.irgs, constants.indexCIDS, constants.indexSCOCIs, indexDCOCIs, // number of columns in partial row meaningless for insert
        0, tc, // Changed column ids
        null, constants.getStreamStorableHeapColIds(), activation);
        rowChanger.setIndexNames(constants.indexNames);
    }
    /* decode lock mode for the execution isolation level */
    int lockMode = decodeLockMode(constants.lockMode);
    rowChanger.open(lockMode);
    /* The source does not know whether or not we are doing a
		 * deferred mode insert.  If we are, then we must clear the
		 * index scan info from the activation so that the row changer
		 * does not re-use that information (which won't be valid for
		 * a deferred mode insert).
		 */
    if (constants.deferred) {
        activation.clearIndexScanInfo();
    }
    if (fkInfoArray != null) {
        if (fkChecker == null) {
            fkChecker = new RISetChecker(lcc, tc, fkInfoArray);
        } else {
            fkChecker.reopen();
        }
    }
    if (firstExecute && constants.deferred) {
        Properties properties = new Properties();
        // Get the properties on the old heap
        rowChanger.getHeapConglomerateController().getInternalTablePropertySet(properties);
        /*
			** If deferred we save a copy of the entire row.
			*/
        rowHolder = new TemporaryRowHolderImpl(activation, properties, resultDescription);
        rowChanger.setRowHolder(rowHolder);
    }
    firstExecuteSpecialHandlingAutoGen(firstExecute, rowChanger, constants.targetUUID);
    while (row != null) {
        // auto-generated key columns.
        if (activation.getAutoGeneratedKeysResultsetMode() && autoGeneratedKeysColumnIndexes.length > 0) {
            autoGeneratedKeysRowsHolder.insert(getCompactRow(row, autoGeneratedKeysColumnIndexes));
        }
        // fill in columns that are computed from expressions on other columns
        evaluateGenerationClauses(generationClauses, activation, sourceResultSet, row, false);
        /*
			** If we're doing a deferred insert, insert into the temporary
			** conglomerate.  Otherwise, insert directly into the permanent
			** conglomerates using the rowChanger.
			*/
        if (constants.deferred) {
            rowHolder.insert(row);
        } else {
            // Immediate mode violations will throw, so we only ever
            // see false here with deferred constraint mode for one or more
            // of the constraints being checked.
            boolean allOk = evaluateCheckConstraints();
            if (fkChecker != null) {
                fkChecker.doFKCheck(activation, row);
            }
            // Objectify any streaming columns that are indexed.
            if (constants.irgs.length > 0) {
                DataValueDescriptor[] rowArray = row.getRowArray();
                for (int i = 0; i < rowArray.length; i++) {
                    // System.out.println("checking " + i);
                    if (!constants.indexedCols[i]) {
                        continue;
                    }
                    if (rowArray[i] instanceof StreamStorable)
                        rowArray[i].getObject();
                }
            }
            if (allOk) {
                rowChanger.insertRow(row, false);
            } else {
                RowLocation offendingRow = rowChanger.insertRow(row, true);
                deferredChecks = DeferredConstraintsMemory.rememberCheckViolations(lcc, constants.targetUUID, schemaName, tableName, deferredChecks, violatingCheckConstraints, offendingRow, new CheckInfo[1]);
            }
        }
        rowCount++;
        if (setUserIdentity) {
            dd = lcc.getDataDictionary();
            td = dd.getTableDescriptor(constants.targetUUID);
            int maxColumns = td.getMaxColumnID();
            int col;
            for (col = 1; col <= maxColumns; col++) {
                ColumnDescriptor cd = td.getColumnDescriptor(col);
                if (cd.isAutoincrement()) {
                    break;
                }
            }
            if (col <= maxColumns) {
                DataValueDescriptor dvd = row.cloneColumn(col);
                user_autoinc = dvd.getLong();
            }
        }
        // No need to do a next on a single row source
        if (constants.singleRowSource) {
            row = null;
        } else {
            row = getNextRowCore(sourceResultSet);
        }
    }
    /*
		** If it's a deferred insert, scan the temporary conglomerate and
		** insert the rows into the permanent conglomerates using rowChanger.
		*/
    if (constants.deferred) {
        if (triggerInfo != null) {
            Vector<AutoincrementCounter> v = null;
            if (aiCache != null) {
                v = new Vector<AutoincrementCounter>();
                for (int i = 0; i < aiCache.length; i++) {
                    String s, t, c;
                    if (aiCache[i] == null)
                        continue;
                    Long initialValue = lcc.lastAutoincrementValue((s = constants.getSchemaName()), (t = constants.getTableName()), (c = constants.getColumnName(i)));
                    AutoincrementCounter aic = new AutoincrementCounter(initialValue, constants.getAutoincIncrement(i), aiCache[i].getLong(), s, t, c, i + 1);
                    v.addElement(aic);
                }
            }
            if (triggerActivator == null) {
                triggerActivator = new TriggerEventActivator(lcc, constants.targetUUID, triggerInfo, TriggerExecutionContext.INSERT_EVENT, activation, v);
            } else {
                triggerActivator.reopen();
            }
            // fire BEFORE trigger, do this before checking constraints
            triggerActivator.notifyEvent(TriggerEvents.BEFORE_INSERT, (CursorResultSet) null, rowHolder.getResultSet(), (int[]) null);
        }
        CursorResultSet rs = rowHolder.getResultSet();
        try {
            rs.open();
            while ((deferredRowBuffer = rs.getNextRow()) != null) {
                // we have to set the source row so the check constraint
                // sees the correct row.
                sourceResultSet.setCurrentRow(deferredRowBuffer);
                boolean allOk = evaluateCheckConstraints();
                if (allOk) {
                    rowChanger.insertRow(deferredRowBuffer, false);
                } else {
                    RowLocation offendingRow = rowChanger.insertRow(deferredRowBuffer, true);
                    deferredChecks = DeferredConstraintsMemory.rememberCheckViolations(lcc, constants.targetUUID, schemaName, tableName, deferredChecks, violatingCheckConstraints, offendingRow, new CheckInfo[1]);
                }
            }
        } finally {
            sourceResultSet.clearCurrentRow();
            rs.close();
        }
        if (fkChecker != null) {
            /*
				** Second scan to make sure all the foreign key
				** constraints are ok.  We have to do this after
				** we have completed the inserts in case of self
				** referencing constraints.
				*/
            rs = rowHolder.getResultSet();
            try {
                rs.open();
                while ((deferredRowBuffer = rs.getNextRow()) != null) {
                    fkChecker.doFKCheck(activation, deferredRowBuffer);
                }
            } finally {
                rs.close();
            }
        }
        // fire AFTER trigger
        if (triggerActivator != null) {
            triggerActivator.notifyEvent(TriggerEvents.AFTER_INSERT, (CursorResultSet) null, rowHolder.getResultSet(), (int[]) null);
        }
    }
    if (rowHolder != null) {
        rowHolder.close();
    // rowHolder kept across opens
    }
    if (fkChecker != null) {
        fkChecker.close();
        fkChecker = null;
    }
    if (setIdentity)
        lcc.setIdentityValue(identityVal);
    else /*
                 * find the value of the identity column from the user inserted value
                 * and do a lcc.setIdentityValue(<user_value>);
                 */
    if (setUserIdentity) {
        lcc.setIdentityValue(user_autoinc);
    }
}
Also used : CursorResultSet(org.apache.derby.iapi.sql.execute.CursorResultSet) ResultColumnDescriptor(org.apache.derby.iapi.sql.ResultColumnDescriptor) ColumnDescriptor(org.apache.derby.iapi.sql.dictionary.ColumnDescriptor) Properties(java.util.Properties) LanguageProperties(org.apache.derby.iapi.sql.LanguageProperties) ExecRow(org.apache.derby.iapi.sql.execute.ExecRow) StreamStorable(org.apache.derby.iapi.services.io.StreamStorable) CheckInfo(org.apache.derby.impl.sql.execute.DeferredConstraintsMemory.CheckInfo) DataValueDescriptor(org.apache.derby.iapi.types.DataValueDescriptor) RowLocation(org.apache.derby.iapi.types.RowLocation)

Example 2 with StreamStorable

use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.

the class InsertResultSet method changedRow.

// TargetResultSet interface
/**
 * @see TargetResultSet#changedRow
 *
 * @exception StandardException thrown if cursor finish ed.
 */
public void changedRow(ExecRow execRow, RowLocation rowLocation) throws StandardException {
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(bulkInsert, "bulkInsert exected to be true");
    }
    /* Set up sorters, etc. if 1st row and there are indexes */
    if (constants.irgs.length > 0) {
        RowLocation rlClone = (RowLocation) rowLocation.cloneValue(false);
        // Objectify any the streaming columns that are indexed.
        for (int i = 0; i < execRow.getRowArray().length; i++) {
            if (!constants.indexedCols[i]) {
                continue;
            }
            if (execRow.getRowArray()[i] instanceof StreamStorable)
                ((DataValueDescriptor) execRow.getRowArray()[i]).getObject();
        }
        // Every index row will share the same row location, etc.
        if (firstRow) {
            firstRow = false;
            indexRows = new ExecIndexRow[constants.irgs.length];
            setUpAllSorts(execRow.getNewNullRow(), rlClone);
        }
        // Put the row into the indexes
        for (int index = 0; index < constants.irgs.length; index++) {
            // Get a new object Array for the index
            indexRows[index].getNewObjectArray();
            // Associate the index row with the source row
            constants.irgs[index].getIndexRow(execRow, rlClone, indexRows[index], (FormatableBitSet) null);
            // Insert the index row into the matching sorter
            sorters[index].insert(indexRows[index].getRowArray());
        }
    }
}
Also used : StreamStorable(org.apache.derby.iapi.services.io.StreamStorable) RowLocation(org.apache.derby.iapi.types.RowLocation)

Example 3 with StreamStorable

use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.

the class StoredPage method logColumn.

/**
 * Log column from input row to the given output stream.
 * <p>
 * Read data from row[arrayPosition], and write the column data in
 * raw store page format to the given column.  Along the way determine
 * if the column will fit on the current page.
 * <p>
 * Action taken in this routine is determined by the kind of column as
 * specified in the columnFlag:
 *     COLUMN_NONE   - the column is insignificant
 *     COLUMN_FIRST  - this is the first column in a logRow() call
 *     COLUMN_LONG   - this is a known long column, therefore we will
 *                     store part of the column on the current page and
 *                     overflow the rest if necessary.
 * <p>
 * Upon entry to this routine logicalDataOut is tied to the
 * DynamicByteArrayOutputStream out.
 * <BR>
 * If a column is a long column and it does not totally fit on the current
 * page, then a LongColumnException is thrown.  We package up info about
 * the current long column in the partially filled in exception so that
 * callers can take correct action.  The column will now be set a as a
 * stream.
 *
 * @return The spaceAvailable after accounting for space for this column.
 *
 * @param row           array of column from which to read the column from.
 * @param arrayPosition The array position of column to be reading from row.
 * @param out           The stream to write the raw store page format of the
 *                      the column to.
 * @param spaceAvailable    The number of bytes available on the page for
 *                          this column, this may differ from current page
 *                          as it may include bytes used by previous
 *                          columns.
 * @param columnFlag    one of: COLUMN_NONE, COLUMN_FIRST, or COLUMN_LONG.
 *
 * @exception  StandardException    Standard exception policy.
 * @exception  LongColumnException  Thrown if column will not fit on a
 *                                  single page. See notes above
 */
private int logColumn(Object[] row, int arrayPosition, DynamicByteArrayOutputStream out, int spaceAvailable, int columnFlag, int overflowThreshold) throws StandardException, IOException {
    // RESOLVE (mikem) - why will row be null?
    Object column = (row != null ? row[arrayPosition] : null);
    // header is already formatted.
    if (column instanceof RawField) {
        // field data is raw, no need to set up a field header etc.
        byte[] data = ((RawField) column).getData();
        if (data.length <= spaceAvailable) {
            out.write(data);
            spaceAvailable -= data.length;
        }
        return spaceAvailable;
    }
    // If this is a long column, it may fit in this page or it may not.
    boolean longColumnDone = true;
    // default field status.
    int fieldStatus = StoredFieldHeader.setFixed(StoredFieldHeader.setInitial(), true);
    int beginPosition = out.getPosition();
    int columnBeginPosition = 0;
    int headerLength;
    int fieldDataLength = 0;
    if (column instanceof StreamStorable) {
        StreamStorable stream_storable_column = (StreamStorable) column;
        if (stream_storable_column.returnStream() != null) {
            column = (Object) stream_storable_column.returnStream();
        }
    }
    if ((column == null) && (columnFlag != COLUMN_CREATE_NULL)) {
        fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
        headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
    } else if (column instanceof InputStream) {
        RememberBytesInputStream bufferedIn = null;
        int bufferLen = 0;
        int estimatedMaxDataSize = getMaxDataLength(spaceAvailable, overflowThreshold);
        // buffer.
        if (column instanceof RememberBytesInputStream) {
            // data is already RememberBytesInputStream
            bufferedIn = (RememberBytesInputStream) column;
            bufferLen = bufferedIn.numBytesSaved();
        } else {
            // data comes in as an inputstream
            bufferedIn = new RememberBytesInputStream((InputStream) column, new MemByteHolder(maxFieldSize + 1));
            // into the RememberBytesInputStream.
            if (row[arrayPosition] instanceof StreamStorable)
                ((StreamStorable) row[arrayPosition]).setStream(bufferedIn);
            // set column to the RememberBytesInputStream so that
            // all future access to this column will be able to get
            // at bytes that have been already read. This assignment
            // is needed to ensure that if long column exception is
            // thrown, the column is set correctly
            column = bufferedIn;
        }
        // read the buffer by reading the max we can read.
        if (bufferLen < (estimatedMaxDataSize + 1)) {
            bufferLen += bufferedIn.fillBuf(estimatedMaxDataSize + 1 - bufferLen);
        }
        if ((bufferLen <= estimatedMaxDataSize)) {
            // we will be able to fit this into the page
            fieldDataLength = bufferLen;
            fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
            headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
            // if the field is extensible, then we write the serializable
            // formatId.  if the field is non-extensible, we don't need to
            // write the formatId.  but at this point, how do we know
            // whether the field is extensible or not???  For Plato release,
            // we do not support InputStream on extensible types,
            // therefore, we ignore the formatId for now.
            bufferedIn.putBuf(logicalDataOut, fieldDataLength);
        } else {
            if (columnFlag == COLUMN_LONG) {
                // column is a long column and the remaining portion does
                // not fit on the current page.
                longColumnDone = false;
                // it's a portion of a long column, and there is more to
                // write reserve enough room for overflow pointer, then
                // write as much data as we can leaving an extra 2 bytes
                // for overflow field header.
                fieldDataLength = estimatedMaxDataSize - OVERFLOW_POINTER_SIZE - 2;
                fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
                headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
                bufferedIn.putBuf(logicalDataOut, fieldDataLength);
                // now, we need to adjust the buffer, move the unread
                // bytes to the beginning position the cursor correctly,
                // so, next time around, we can read more into the buffer.
                int remainingBytes = bufferedIn.available();
                // move the unread bytes to the beginning of the byteHolder.
                int bytesShifted = bufferedIn.shiftToFront();
            } else {
                // column not a long column and does not fit on page.
                int delta = maxFieldSize - bufferLen + 1;
                if (delta > 0)
                    bufferLen += bufferedIn.fillBuf(delta);
                fieldDataLength = bufferLen;
                // the data will not fit on this page make sure the new
                // input stream is passed back to the upper layer...
                column = (Object) bufferedIn;
            }
        }
    } else if (columnFlag == COLUMN_CREATE_NULL) {
        // 
        // This block handles the case when a couple columns have been added
        // recently and now one of the later columns is being updated. Newly added columns
        // which appear in the row before the updated column don't actually have
        // any values yet. We stuff NULLs into those newly added columns here.
        // This fixes DERBY-5679.
        // 
        fieldStatus = StoredFieldHeader.setNull(fieldStatus, true);
        // header is written with 0 length here.
        headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
    } else if (column instanceof DataValueDescriptor) {
        DataValueDescriptor sColumn = (DataValueDescriptor) column;
        boolean isNull = (columnFlag == COLUMN_CREATE_NULL) || sColumn.isNull();
        if (isNull) {
            fieldStatus = StoredFieldHeader.setNull(fieldStatus, true);
        }
        // header is written with 0 length here.
        headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
        if (!isNull) {
            // write the field data to the log
            try {
                columnBeginPosition = out.getPosition();
                sColumn.writeExternal(logicalDataOut);
            } catch (IOException ioe) {
                // SQLData error reporting
                if (logicalDataOut != null) {
                    Exception ne = logicalDataOut.getNestedException();
                    if (ne != null) {
                        if (ne instanceof StandardException) {
                            throw (StandardException) ne;
                        }
                    }
                }
                throw StandardException.newException(SQLState.DATA_STORABLE_WRITE_EXCEPTION, ioe);
            }
            fieldDataLength = (out.getPosition() - beginPosition) - headerLength;
        }
    } else if (column instanceof RecordHandle) {
        // we are inserting an overflow pointer for a long column
        // casted reference to column to avoid repeated casting.
        RecordHandle overflowHandle = (RecordHandle) column;
        fieldStatus = StoredFieldHeader.setOverflow(fieldStatus, true);
        headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
        fieldDataLength += CompressedNumber.writeLong(out, overflowHandle.getPageNumber());
        fieldDataLength += CompressedNumber.writeInt(out, overflowHandle.getId());
    } else {
        // Serializable/Externalizable/Formattable
        // all look the same at this point.
        // header is written with 0 length here.
        headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
        logicalDataOut.writeObject(column);
        fieldDataLength = (out.getPosition() - beginPosition) - headerLength;
    }
    // calculate the size of the field on page with compresed field header
    fieldStatus = StoredFieldHeader.setFixed(fieldStatus, false);
    int fieldSizeOnPage = StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize) + fieldDataLength;
    userRowSize += fieldDataLength;
    boolean fieldIsLong = isLong(fieldSizeOnPage, overflowThreshold);
    // Do we have enough space on the page for this field?
    if (((spaceAvailable < fieldSizeOnPage) || (fieldIsLong)) && (columnFlag != COLUMN_LONG)) {
        if (fieldIsLong) {
            if (!(column instanceof InputStream)) {
                // Convert already written object to an InputStream.
                ByteArray fieldData = new ByteArray(((DynamicByteArrayOutputStream) out).getByteArray(), (columnBeginPosition), fieldDataLength);
                ByteArrayInputStream columnIn = new ByteArrayInputStream(fieldData.getArray(), columnBeginPosition, fieldDataLength);
                MemByteHolder byteHolder = new MemByteHolder(fieldDataLength + 1);
                RememberBytesInputStream bufferedIn = new RememberBytesInputStream(columnIn, byteHolder);
                // the data will not fit on this page make sure the new
                // input stream is passed back to the upper layer...
                column = bufferedIn;
            }
            out.setPosition(beginPosition);
            // This exception carries the information for the client
            // routine to continue inserting the long row on multiple
            // pages.
            LongColumnException lce = new LongColumnException();
            lce.setColumn(column);
            throw lce;
        } else {
            // Column does not fit on this page, but it isn't a long column.
            out.setPosition(beginPosition);
            return (spaceAvailable);
        }
    }
    // Now we go back to update the fieldDataLength in the field header
    out.setPosition(beginPosition);
    // slotFieldSize is set based on the pageSize.
    // We are borrowing this to set the size of our fieldDataLength.
    fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
    headerLength = StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);
    // set position to the end of the field
    out.setPosition(beginPosition + fieldDataLength + headerLength);
    spaceAvailable -= fieldSizeOnPage;
    // YYZ: revisit
    if (columnFlag == COLUMN_LONG) {
        // BasePage.insertLongColumn to signal end of loop.
        if (longColumnDone)
            return -1;
        else
            return 1;
    } else {
        return (spaceAvailable);
    }
}
Also used : FormatIdInputStream(org.apache.derby.iapi.services.io.FormatIdInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) ArrayInputStream(org.apache.derby.iapi.services.io.ArrayInputStream) InputStream(java.io.InputStream) RecordHandle(org.apache.derby.iapi.store.raw.RecordHandle) IOException(java.io.IOException) StandardException(org.apache.derby.shared.common.error.StandardException) IOException(java.io.IOException) EOFException(java.io.EOFException) StandardException(org.apache.derby.shared.common.error.StandardException) ByteArrayInputStream(java.io.ByteArrayInputStream) StreamStorable(org.apache.derby.iapi.services.io.StreamStorable) ByteArray(org.apache.derby.iapi.util.ByteArray) DataValueDescriptor(org.apache.derby.iapi.types.DataValueDescriptor)

Example 4 with StreamStorable

use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.

the class StoredPage method readOneColumnFromPage.

/**
 * Read just one column from stream into row.
 * <p>
 * The routine reads just one column from the row, it is mostly code
 * taken from readRecordFromStream, but highly optimized to just get
 * one column from a non-overflow row.  It can only be called to read
 * a row from the pageData array as it directly accesses the page array
 * to avoid the Stream overhead while processing non-user data which
 * does not need the limit functionality.
 * <p>
 * It is expected that this code will be called to read in a column
 * associated with a qualifiers which are applied one column at a time,
 * and has been specialized to proved the greatest peformance for
 * processing qualifiers.  This kind of access is done when scanning
 * large datasets while applying qualifiers and thus any performance
 * gain at this low level is multiplied by the large number of rows that
 * may be iterated over.
 * <p>
 * The column is read into the object located in row[qual_colid].
 *
 * @param row                   col is read into object in row[qual_colid].
 * @param offset_to_field_data  offset in bytes from top of page to field
 * @param colid                 the column id to read, colid N is row[N]
 * @param recordHeader          record header of row to read column from.
 * @param recordToLock          record handle to lock,
 *                              used by overflow column code.
 *
 * @exception  StandardException  Standard exception policy.
 */
private final void readOneColumnFromPage(Object[] row, int colid, int offset_to_field_data, StoredRecordHeader recordHeader, RecordHandle recordToLock) throws StandardException, IOException {
    ErrorObjectInput inUserCode = null;
    // Reads in this routine are always against the raw data in the
    // pageData array, thus it can assume array access to page data array.
    ArrayInputStream lrdi = rawDataIn;
    try {
        if (SanityManager.DEBUG) {
            if (colid >= row.length)
                SanityManager.THROWASSERT("colid = " + colid + ";row length = " + row.length);
            // currently this routine will not work on long rows.
            if (recordHeader.getFirstField() != 0) {
                SanityManager.THROWASSERT("recordHeader.getFirstField() = " + recordHeader.getFirstField());
            }
        }
        Object column = row[colid];
        // if the column id exists on this page.
        if (colid <= (recordHeader.getNumberFields() - 1)) {
            for (int columnId = colid; columnId > 0; columnId--) {
                offset_to_field_data += StoredFieldHeader.readTotalFieldLength(pageData, offset_to_field_data);
            }
            // read the field header
            // read the status byte.
            int fieldStatus = StoredFieldHeader.readStatus(pageData, offset_to_field_data);
            // read the field data length, and position on 1st byte of data.
            int fieldDataLength = StoredFieldHeader.readFieldLengthAndSetStreamPosition(pageData, offset_to_field_data + StoredFieldHeader.STORED_FIELD_HEADER_STATUS_SIZE, fieldStatus, slotFieldSize, lrdi);
            if (SanityManager.DEBUG) {
                SanityManager.ASSERT(!StoredFieldHeader.isExtensible(fieldStatus), "extensible fields not supported yet");
            }
            if (!StoredFieldHeader.isNonexistent(fieldStatus)) {
                boolean isOverflow = StoredFieldHeader.isOverflow(fieldStatus);
                OverflowInputStream overflowIn = null;
                if (isOverflow) {
                    // A fetched long column is returned as a stream
                    long overflowPage = CompressedNumber.readLong((InputStream) lrdi);
                    int overflowId = CompressedNumber.readInt((InputStream) lrdi);
                    // Prepare the stream for results...
                    // create the byteHolder the size of a page, so, that it
                    // will fit the field Data that would fit on a page.
                    MemByteHolder byteHolder = new MemByteHolder(pageData.length);
                    overflowIn = new OverflowInputStream(byteHolder, owner, overflowPage, overflowId, recordToLock);
                }
                // Deal with Storable columns
                if (column instanceof DataValueDescriptor) {
                    DataValueDescriptor sColumn = (DataValueDescriptor) column;
                    // is the column null ?
                    if (StoredFieldHeader.isNull(fieldStatus)) {
                        sColumn.restoreToNull();
                    } else {
                        // set the limit for the user read
                        if (!isOverflow) {
                            // normal, non-overflow column case.
                            lrdi.setLimit(fieldDataLength);
                            inUserCode = lrdi;
                            sColumn.readExternalFromArray(lrdi);
                            inUserCode = null;
                            int unread = lrdi.clearLimit();
                            if (unread != 0)
                                DataInputUtil.skipFully(lrdi, unread);
                        } else {
                            // fetched column is a Storable long column.
                            FormatIdInputStream newIn = new FormatIdInputStream(overflowIn);
                            if ((sColumn instanceof StreamStorable)) {
                                ((StreamStorable) sColumn).setStream(newIn);
                            } else {
                                inUserCode = newIn;
                                sColumn.readExternal(newIn);
                                inUserCode = null;
                            }
                        }
                    }
                } else {
                    if (StoredFieldHeader.isNull(fieldStatus)) {
                        throw StandardException.newException(SQLState.DATA_NULL_STORABLE_COLUMN, Integer.toString(colid));
                    }
                    // This is a non-extensible field, which means the
                    // caller must know the correct type and thus the
                    // element in row is the correct type or null. It must
                    // be Serializable.
                    // 
                    // We do not support Externalizable here.
                    lrdi.setLimit(fieldDataLength);
                    inUserCode = lrdi;
                    // RESOLVE (no non-storables?)
                    row[colid] = (Object) lrdi.readObject();
                    inUserCode = null;
                    int unread = lrdi.clearLimit();
                    if (unread != 0)
                        DataInputUtil.skipFully(lrdi, unread);
                }
            } else {
                if (column instanceof DataValueDescriptor) {
                    // RESOLVE - This is in place for 1.2. In the future
                    // we may want to return this column as non-existent
                    // even if it is a storable column, or maybe use a
                    // supplied default.
                    ((DataValueDescriptor) column).restoreToNull();
                } else {
                    row[colid] = null;
                }
            }
        } else {
            if (column instanceof DataValueDescriptor) {
                // RESOLVE - This is in place for 1.2. In the future
                // we may want to return this column as non-existent
                // even if it is a storable column, or maybe use a
                // supplied default.
                ((DataValueDescriptor) column).restoreToNull();
            } else {
                row[colid] = null;
            }
        }
    } catch (IOException ioe) {
        if (inUserCode != null) {
            lrdi.clearLimit();
            if (ioe instanceof EOFException) {
                if (SanityManager.DEBUG) {
                    SanityManager.DEBUG_PRINT("DEBUG_TRACE", "StoredPage.readOneColumnFromPage - EOF while restoring record: " + recordHeader + "Page dump = " + this);
                    SanityManager.showTrace(ioe);
                }
                // an EOFException when it sees the -1 from a read
                throw StandardException.newException(SQLState.DATA_STORABLE_READ_MISMATCH, ioe, inUserCode.getErrorInfo());
            }
            // some SQLData error reporting
            Exception ne = inUserCode.getNestedException();
            if (ne != null) {
                if (ne instanceof InstantiationException) {
                    throw StandardException.newException(SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION, ne, inUserCode.getErrorInfo());
                }
                if (ne instanceof IllegalAccessException) {
                    throw StandardException.newException(SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION, ne, inUserCode.getErrorInfo());
                }
                if (ne instanceof StandardException) {
                    throw (StandardException) ne;
                }
            }
            throw StandardException.newException(SQLState.DATA_STORABLE_READ_EXCEPTION, ioe, inUserCode.getErrorInfo());
        }
        // re-throw to higher levels so they can put it in correct context.
        throw ioe;
    } catch (ClassNotFoundException cnfe) {
        lrdi.clearLimit();
        // make the database corrupt, just that this field is inaccessable
        throw StandardException.newException(SQLState.DATA_STORABLE_READ_MISSING_CLASS, cnfe, inUserCode.getErrorInfo());
    } catch (LinkageError le) {
        // Some error during the link of a user class
        if (inUserCode != null) {
            lrdi.clearLimit();
            throw StandardException.newException(SQLState.DATA_STORABLE_READ_EXCEPTION, le, inUserCode.getErrorInfo());
        }
        throw le;
    }
}
Also used : ErrorObjectInput(org.apache.derby.iapi.services.io.ErrorObjectInput) IOException(java.io.IOException) StandardException(org.apache.derby.shared.common.error.StandardException) IOException(java.io.IOException) EOFException(java.io.EOFException) FormatIdInputStream(org.apache.derby.iapi.services.io.FormatIdInputStream) StandardException(org.apache.derby.shared.common.error.StandardException) StreamStorable(org.apache.derby.iapi.services.io.StreamStorable) EOFException(java.io.EOFException) ByteArrayInputStream(java.io.ByteArrayInputStream) ArrayInputStream(org.apache.derby.iapi.services.io.ArrayInputStream) DataValueDescriptor(org.apache.derby.iapi.types.DataValueDescriptor)

Example 5 with StreamStorable

use of org.apache.derby.iapi.services.io.StreamStorable in project derby by apache.

the class StoredPage method readRecordFromArray.

private final boolean readRecordFromArray(Object[] row, int max_colid, int[] vCols, int[] mCols, ArrayInputStream dataIn, StoredRecordHeader recordHeader, RecordHandle recordToLock) throws StandardException, IOException {
    ErrorObjectInput inUserCode = null;
    try {
        // Get the number of columns in the row.
        int numberFields = recordHeader.getNumberFields();
        int startColumn = recordHeader.getFirstField();
        if (startColumn > max_colid) {
            // done if the startColumn is higher than highest column.
            return true;
        }
        // For each column in the row, restore the column from
        // the corresponding field in the record.  If the field
        // is missing or not set, set the column to null.
        int highestColumnOnPage = numberFields + startColumn;
        int vColsSize = (vCols == null) ? 0 : vCols.length;
        int offset_to_field_data = dataIn.getPosition();
        for (int columnId = startColumn; columnId <= max_colid; columnId++) {
            // that have already been read.
            if (((vCols != null) && (!(vColsSize > columnId && (vCols[columnId] != 0)))) || ((mCols != null) && (mCols[columnId] != 0))) {
                if (columnId < highestColumnOnPage) {
                    // If the field exists in the row on the page, but the
                    // partial row being returned does not include it,
                    // skip the field ...
                    offset_to_field_data += StoredFieldHeader.readTotalFieldLength(pageData, offset_to_field_data);
                }
                continue;
            } else if (columnId < highestColumnOnPage) {
                // the column is on this page.
                // read the field header
                // read the status byte.
                int fieldStatus = StoredFieldHeader.readStatus(pageData, offset_to_field_data);
                // read the field data length, position on 1st byte of data
                int fieldDataLength = StoredFieldHeader.readFieldLengthAndSetStreamPosition(pageData, offset_to_field_data + StoredFieldHeader.STORED_FIELD_HEADER_STATUS_SIZE, fieldStatus, slotFieldSize, dataIn);
                if (SanityManager.DEBUG) {
                    SanityManager.ASSERT(!StoredFieldHeader.isExtensible(fieldStatus), "extensible fields not supported yet");
                }
                Object column = row[columnId];
                OverflowInputStream overflowIn = null;
                if ((fieldStatus & StoredFieldHeader.FIELD_NONEXISTENT) != StoredFieldHeader.FIELD_NONEXISTENT) {
                    // normal path - field exists.
                    boolean isOverflow = ((fieldStatus & StoredFieldHeader.FIELD_OVERFLOW) != 0);
                    if (isOverflow) {
                        // A fetched long column is returned as a stream
                        long overflowPage = CompressedNumber.readLong((InputStream) dataIn);
                        int overflowId = CompressedNumber.readInt((InputStream) dataIn);
                        // Prepare the stream for results...
                        // create the byteHolder the size of a page, so,
                        // that it will fit the field Data that would fit
                        // on a page.
                        MemByteHolder byteHolder = new MemByteHolder(pageData.length);
                        overflowIn = new OverflowInputStream(byteHolder, owner, overflowPage, overflowId, recordToLock);
                    }
                    // Deal with Object columns
                    if (column instanceof DataValueDescriptor) {
                        DataValueDescriptor sColumn = (DataValueDescriptor) column;
                        // is the column null ?
                        if ((fieldStatus & StoredFieldHeader.FIELD_NULL) == 0) {
                            // set the limit for the user read
                            if (!isOverflow) {
                                // normal, non-overflow column case.
                                dataIn.setLimit(fieldDataLength);
                                inUserCode = dataIn;
                                sColumn.readExternalFromArray(dataIn);
                                inUserCode = null;
                                int unread = dataIn.clearLimit();
                                if (unread != 0)
                                    DataInputUtil.skipFully(dataIn, unread);
                            } else {
                                // column being fetched is a long column.
                                FormatIdInputStream newIn = new FormatIdInputStream(overflowIn);
                                // long columns are fetched as a stream.
                                boolean fetchStream = true;
                                if (!(sColumn instanceof StreamStorable)) {
                                    fetchStream = false;
                                }
                                if (fetchStream) {
                                    ((StreamStorable) sColumn).setStream(newIn);
                                } else {
                                    inUserCode = newIn;
                                    sColumn.readExternal(newIn);
                                    inUserCode = null;
                                }
                            }
                        } else {
                            sColumn.restoreToNull();
                        }
                    } else {
                        if (StoredFieldHeader.isNull(fieldStatus)) {
                            throw StandardException.newException(SQLState.DATA_NULL_STORABLE_COLUMN, Integer.toString(columnId));
                        }
                        // This is a non-extensible field, which means the
                        // caller must know the correct type and thus the
                        // element in row is the correct type or null. It
                        // must be Serializable.
                        // 
                        // We do not support Externalizable here.
                        dataIn.setLimit(fieldDataLength);
                        inUserCode = dataIn;
                        // RESOLVE (no non-storables?)
                        row[columnId] = (Object) dataIn.readObject();
                        inUserCode = null;
                        int unread = dataIn.clearLimit();
                        if (unread != 0)
                            DataInputUtil.skipFully(dataIn, unread);
                    }
                } else {
                    if (column instanceof DataValueDescriptor) {
                        // RESOLVE - This is in place for 1.2. In the future
                        // we may want to return this column as non-existent
                        // even if it is a storable column, or maybe use a
                        // supplied default.
                        ((DataValueDescriptor) column).restoreToNull();
                    } else {
                        row[columnId] = null;
                    }
                }
                // move the counter to point to beginning of next field.
                offset_to_field_data = dataIn.getPosition();
            } else {
                // field is non-existent
                Object column = row[columnId];
                if (column instanceof DataValueDescriptor) {
                    // RESOLVE - This is in place for 1.2. In the future
                    // we may want to return this column as non-existent
                    // even if it is a storable column, or maybe use a
                    // supplied default.
                    ((DataValueDescriptor) column).restoreToNull();
                } else {
                    row[columnId] = null;
                }
            }
        }
        if ((numberFields + startColumn) > max_colid)
            return true;
        else
            return false;
    } catch (IOException ioe) {
        if (inUserCode != null) {
            dataIn.clearLimit();
            if (ioe instanceof EOFException) {
                if (SanityManager.DEBUG) {
                    SanityManager.DEBUG_PRINT("DEBUG_TRACE", "StoredPage - EOF while restoring record: " + recordHeader + "Page dump = " + this);
                }
                // an EOFException when it sees the -1 from a read
                throw StandardException.newException(SQLState.DATA_STORABLE_READ_MISMATCH, ioe, inUserCode.getErrorInfo());
            }
            // some SQLData error reporting
            Exception ne = inUserCode.getNestedException();
            if (ne != null) {
                if (ne instanceof InstantiationException) {
                    throw StandardException.newException(SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION, ne, inUserCode.getErrorInfo());
                }
                if (ne instanceof IllegalAccessException) {
                    throw StandardException.newException(SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION, ne, inUserCode.getErrorInfo());
                }
                if (ne instanceof StandardException) {
                    throw (StandardException) ne;
                }
            }
            throw StandardException.newException(SQLState.DATA_STORABLE_READ_EXCEPTION, ioe, inUserCode.getErrorInfo());
        }
        // re-throw to higher levels so they can put it in correct context.
        throw ioe;
    } catch (ClassNotFoundException cnfe) {
        dataIn.clearLimit();
        // make the database corrupt, just that this field is inaccessable
        throw StandardException.newException(SQLState.DATA_STORABLE_READ_MISSING_CLASS, cnfe, inUserCode.getErrorInfo());
    } catch (LinkageError le) {
        // Some error during the link of a user class
        if (inUserCode != null) {
            dataIn.clearLimit();
            throw StandardException.newException(SQLState.DATA_STORABLE_READ_EXCEPTION, le, inUserCode.getErrorInfo());
        }
        throw le;
    }
}
Also used : ErrorObjectInput(org.apache.derby.iapi.services.io.ErrorObjectInput) FormatIdInputStream(org.apache.derby.iapi.services.io.FormatIdInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) ArrayInputStream(org.apache.derby.iapi.services.io.ArrayInputStream) InputStream(java.io.InputStream) IOException(java.io.IOException) StandardException(org.apache.derby.shared.common.error.StandardException) IOException(java.io.IOException) EOFException(java.io.EOFException) FormatIdInputStream(org.apache.derby.iapi.services.io.FormatIdInputStream) StandardException(org.apache.derby.shared.common.error.StandardException) StreamStorable(org.apache.derby.iapi.services.io.StreamStorable) EOFException(java.io.EOFException) DataValueDescriptor(org.apache.derby.iapi.types.DataValueDescriptor)

Aggregations

StreamStorable (org.apache.derby.iapi.services.io.StreamStorable)10 DataValueDescriptor (org.apache.derby.iapi.types.DataValueDescriptor)8 FormatIdInputStream (org.apache.derby.iapi.services.io.FormatIdInputStream)5 EOFException (java.io.EOFException)4 IOException (java.io.IOException)4 InputStream (java.io.InputStream)4 StandardException (org.apache.derby.shared.common.error.StandardException)4 ByteArrayInputStream (java.io.ByteArrayInputStream)3 ArrayInputStream (org.apache.derby.iapi.services.io.ArrayInputStream)3 ErrorObjectInput (org.apache.derby.iapi.services.io.ErrorObjectInput)3 RowLocation (org.apache.derby.iapi.types.RowLocation)3 BufferedInputStream (java.io.BufferedInputStream)1 Properties (java.util.Properties)1 FormatableBitSet (org.apache.derby.iapi.services.io.FormatableBitSet)1 LimitInputStream (org.apache.derby.iapi.services.io.LimitInputStream)1 Storable (org.apache.derby.iapi.services.io.Storable)1 LanguageProperties (org.apache.derby.iapi.sql.LanguageProperties)1 ResultColumnDescriptor (org.apache.derby.iapi.sql.ResultColumnDescriptor)1 ColumnDescriptor (org.apache.derby.iapi.sql.dictionary.ColumnDescriptor)1 CursorResultSet (org.apache.derby.iapi.sql.execute.CursorResultSet)1