use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class MaterializedResultSet method getNextRowCore.
/**
* @exception StandardException thrown on failure
*/
public ExecRow getNextRowCore() throws StandardException {
if (isXplainOnlyMode())
return null;
ExecRow result = null;
beginTime = getCurrentTimeMillis();
if (!isOpen)
throw StandardException.newException(SQLState.LANG_RESULT_SET_NOT_OPEN, "next");
/* Should we get the next row from the source or the materialized result set? */
if (fromSource) {
result = getNextRowFromSource();
} else {
result = getNextRowFromTempTable();
}
if (result != null) {
rowsSeen++;
}
setCurrentRow(result);
nextTime += getElapsedMillis(beginTime);
return result;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class MergeJoinResultSet method getNextRowCore.
// ////////////////////////////////////////////////////////////////////
//
// ResultSet interface (leftover from NoPutResultSet)
//
// ////////////////////////////////////////////////////////////////////
/**
* Return the requested values computed
* from the next row (if any) for which
* the restriction evaluates to true.
* <p>
* restriction parameters
* are evaluated for each row.
*
* @exception StandardException Thrown on error
* @exception StandardException ResultSetNotOpen thrown if closed
* @return the next row in the join result
*/
public ExecRow getNextRowCore() throws StandardException {
if (isXplainOnlyMode())
return null;
beginTime = getCurrentTimeMillis();
if (!isOpen)
throw StandardException.newException(SQLState.LANG_RESULT_SET_NOT_OPEN, "next");
if (!isRightOpen) {
openRight();
}
int compareResult;
/*
** For each row in the outer table
*/
while (leftRow != null) {
/*
** If outer > inner, then go to the
** next row in the inner table
*/
while ((compareResult = ((Integer) leftGreaterThanRight.invoke(activation)).intValue()) == GREATER_THAN) {
rightRow = rightResultSet.getNextRowCore();
rowsSeenRight++;
/*
** If there are no more rows in the right
** result set, then done.
*/
if (rightRow == null) {
clearCurrentRow();
return (ExecRow) null;
}
}
/*
** If they match and the restriction passes,
** then return the row.
*/
if ((compareResult == EQUAL) && restrictionIsTrue()) {
ExecRow returnRow = getReturnRow(leftRow, rightRow);
/*
** Move the left scan up one for the next
** getNextRowCore() call.
*/
leftRow = leftResultSet.getNextRowCore();
return returnRow;
}
/*
** Next row left
*/
leftRow = leftResultSet.getNextRowCore();
rowsSeenLeft++;
}
clearCurrentRow();
return (ExecRow) null;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class RowCountResultSet method getNextRowCore.
/**
* Return the requested values computed from the next row (if any)
* <p>
* @exception StandardException thrown on failure.
* @exception StandardException ResultSetNotOpen thrown if not yet open.
*
* @return the next row in the result
*/
public ExecRow getNextRowCore() throws StandardException {
if (isXplainOnlyMode())
return null;
ExecRow result = null;
beginTime = getCurrentTimeMillis();
if (virginal) {
if (offsetMethod != null) {
DataValueDescriptor offVal = (DataValueDescriptor) offsetMethod.invoke(activation);
if (offVal.isNotNull().getBoolean()) {
offset = offVal.getLong();
if (offset < 0) {
throw StandardException.newException(SQLState.LANG_INVALID_ROW_COUNT_OFFSET, Long.toString(offset));
} else {
offset = offVal.getLong();
}
} else {
throw StandardException.newException(SQLState.LANG_ROW_COUNT_OFFSET_FIRST_IS_NULL, "OFFSET");
}
} else {
// not given
offset = 0;
}
if (fetchFirstMethod != null) {
DataValueDescriptor fetchFirstVal = (DataValueDescriptor) fetchFirstMethod.invoke(activation);
if (fetchFirstVal.isNotNull().getBoolean()) {
fetchFirst = fetchFirstVal.getLong();
//
if (hasJDBClimitClause && (fetchFirst == 0)) {
fetchFirst = Long.MAX_VALUE;
}
if (fetchFirst < 1) {
throw StandardException.newException(SQLState.LANG_INVALID_ROW_COUNT_FIRST, Long.toString(fetchFirst));
}
} else {
throw StandardException.newException(SQLState.LANG_ROW_COUNT_OFFSET_FIRST_IS_NULL, "FETCH FIRST/NEXT");
}
}
if (offset > 0) {
// Only skip rows the first time around
virginal = false;
long offsetCtr = offset;
do {
result = source.getNextRowCore();
offsetCtr--;
if (result != null && offsetCtr >= 0) {
rowsFiltered++;
} else {
break;
}
} while (true);
} else {
if (fetchFirstMethod != null && rowsFetched >= fetchFirst) {
result = null;
} else {
result = source.getNextRowCore();
}
}
} else {
if (fetchFirstMethod != null && rowsFetched >= fetchFirst) {
result = null;
} else {
result = source.getNextRowCore();
}
}
if (result != null) {
rowsFetched++;
rowsSeen++;
}
setCurrentRow(result);
if (runTimeStatsOn) {
if (!isTopResultSet) {
// This is simply for RunTimeStats. We first need to get the
// subquery tracking array via the StatementContext
StatementContext sc = activation.getLanguageConnectionContext().getStatementContext();
subqueryTrackingArray = sc.getSubqueryTrackingArray();
}
nextTime += getElapsedMillis(beginTime);
}
return result;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class ScalarAggregateResultSet method getRowFromResultSet.
// /////////////////////////////////////////////////////////////////////////////
//
// SCAN ABSTRACTION UTILITIES
//
// /////////////////////////////////////////////////////////////////////////////
/**
* Get a row from the input result set.
*
* @param doClone - true of the row should be cloned
*
* @exception StandardException Thrown on error
*/
public ExecIndexRow getRowFromResultSet(boolean doClone) throws StandardException {
ExecRow sourceRow;
ExecIndexRow inputRow = null;
if ((sourceRow = source.getNextRowCore()) != null) {
rowsInput++;
sourceExecIndexRow.execRowToExecIndexRow(doClone ? sourceRow.getClone() : sourceRow);
inputRow = sourceExecIndexRow;
}
return inputRow;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class ScrollInsensitiveResultSet method updateRow.
/**
* @see NoPutResultSet#updateRow
*
* Sets the updated column of the hash table to true and updates the row
* in the hash table with the new values for the row.
*/
public void updateRow(ExecRow row, RowChanger rowChanger) throws StandardException {
ProjectRestrictResultSet prRS = null;
if (source instanceof ProjectRestrictResultSet) {
prRS = (ProjectRestrictResultSet) source;
} else if (source instanceof RowCountResultSet) {
// To do any projection in the presence of an intervening
// RowCountResultSet, we get its child.
prRS = ((RowCountResultSet) source).getUnderlyingProjectRestrictRS();
}
positionInHashTable.setValue(currentPosition);
DataValueDescriptor[] hashRowArray = getCurrentRowFromHashtable();
RowLocation rowLoc = (RowLocation) hashRowArray[POS_ROWLOCATION];
// Maps from each selected column to underlying base table column
// number, i.e. as from getBaseProjectMapping if a PRN exists, if not
// we construct one, so we always know where in the hash table a
// modified column will need to go (we do our own projection).
int[] map;
if (prRS != null) {
map = prRS.getBaseProjectMapping();
} else {
// create a natural projection mapping for all columns in SELECT
// list so we can treat the cases of no PRN and PRN the same.
int noOfSelectedColumns = hashRowArray.length - (LAST_EXTRA_COLUMN + 1);
map = new int[noOfSelectedColumns];
// is no underlying PRN.
for (int i = 0; i < noOfSelectedColumns; i++) {
// column is 1-based
map[i] = i + 1;
}
}
// Construct a new row based on the old one and the updated columns
ExecRow newRow = new ValueRow(map.length);
for (int i = 0; i < map.length; i++) {
// What index in ExecRow "row" corresponds to this position in the
// hash table, if any?
int rowColumn = rowChanger.findSelectedCol(map[i]);
if (rowColumn > 0) {
// OK, a new value has been supplied, use it
newRow.setColumn(i + 1, row.getColumn(rowColumn));
} else {
// No new value, so continue using old one
newRow.setColumn(i + 1, hashRowArray[LAST_EXTRA_COLUMN + 1 + i]);
}
}
ht.remove(new SQLInteger(currentPosition));
addRowToHashTable(newRow, currentPosition, rowLoc, true);
// Modify row to refer to data in the BackingStoreHashtable.
// This allows reading of data which goes over multiple pages
// when doing the actual update (LOBs). Putting columns of
// type SQLBinary to disk, has destructive effect on the columns,
// and they need to be re-read. That is the reason this is needed.
DataValueDescriptor[] backedData = getRowArrayFromHashTable(currentPosition);
for (int i = 0; i < map.length; i++) {
// What index in "row" corresponds to this position in the table,
// if any?
int rowColumn = rowChanger.findSelectedCol(map[i]);
if (rowColumn > 0) {
// OK, put the value in the hash table back to row.
row.setColumn(rowColumn, backedData[i]);
}
}
}
Aggregations