use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class ProjectRestrictResultSet method doProjection.
/**
* Do the projection against the source row. Use reflection
* where necessary, otherwise get the source column into our
* result row.
*
* @param sourceRow The source row.
*
* @return The result of the projection
*
* @exception StandardException thrown on failure.
*/
private ExecRow doProjection(ExecRow sourceRow) throws StandardException {
// No need to use reflection if reusing the result
if (reuseResult && projRow != null) {
/* Make sure we reset the current row based on the re-used
* result. Otherwise, if the "current row" for this result
* set was nulled out in a previous call to getNextRow(),
* which can happen if this node is the right-side of
* a left outer join, the "current row" stored for this
* result set in activation.row would remain null, which
* would be wrong. DERBY-3538.
*/
setCurrentRow(projRow);
return projRow;
}
ExecRow result;
// Use reflection to do as much of projection as required
if (projection != null) {
result = (ExecRow) projection.invoke(activation);
} else {
result = mappedResultRow;
}
// Copy any mapped columns from the source
for (int index = 0; index < projectMapping.length; index++) {
if (projectMapping[index] != -1) {
DataValueDescriptor dvd = sourceRow.getColumn(projectMapping[index]);
// If the value isn't a stream, don't bother cloning it.
if (cloneMap[index] && dvd.hasStream()) {
dvd = dvd.cloneValue(false);
}
result.setColumn(index + 1, dvd);
}
}
/* We need to reSet the current row after doing the projection */
setCurrentRow(result);
/* Remember the result if reusing it */
if (reuseResult) {
projRow = result;
}
return result;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class ProjectRestrictResultSet method getNextRowCore.
/**
* Return the requested values computed
* from the next row (if any) for which
* the restriction evaluates to true.
* <p>
* restriction and projection parameters
* are evaluated for each row.
*
* @exception StandardException thrown on failure.
* @exception StandardException ResultSetNotOpen thrown if not yet open.
*
* @return the next row in the result
*/
public ExecRow getNextRowCore() throws StandardException {
if (isXplainOnlyMode())
return null;
ExecRow candidateRow = null;
ExecRow result = null;
boolean restrict = false;
DataValueDescriptor restrictBoolean;
long beginRT = 0;
/* Return null if open was short circuited by false constant expression */
if (shortCircuitOpen) {
return result;
}
beginTime = getCurrentTimeMillis();
do {
if (validatingCheckConstraint) {
candidateRow = null;
while (rowLocations.hasMoreElements() && candidateRow == null) {
DataValueDescriptor[] row = (DataValueDescriptor[]) rowLocations.nextElement();
RowLocation rl = (RowLocation) ((SQLRef) row[0]).getObject();
((ValidateCheckConstraintResultSet) source).positionScanAtRowLocation(rl);
candidateRow = source.getNextRowCore();
// if null (deleted), we move to next
}
} else {
candidateRow = source.getNextRowCore();
}
if (candidateRow != null) {
beginRT = getCurrentTimeMillis();
/* If restriction is null, then all rows qualify */
if (restriction == null) {
restrict = true;
} else {
setCurrentRow(candidateRow);
restrictBoolean = (DataValueDescriptor) restriction.invoke(activation);
restrictionTime += getElapsedMillis(beginRT);
// if the result is null, we make it false --
// so the row won't be returned.
restrict = ((!restrictBoolean.isNull()) && restrictBoolean.getBoolean());
if (!restrict) {
rowsFiltered++;
}
}
/* Update the run time statistics */
rowsSeen++;
}
} while ((candidateRow != null) && (!restrict));
if (candidateRow != null) {
beginRT = getCurrentTimeMillis();
result = doProjection(candidateRow);
projectionTime += getElapsedMillis(beginRT);
} else /* Clear the current row, if null */
{
clearCurrentRow();
}
currentRow = result;
if (runTimeStatsOn) {
if (!isTopResultSet) {
/* This is simply for RunTimeStats */
/* We first need to get the subquery tracking array via the StatementContext */
StatementContext sc = activation.getLanguageConnectionContext().getStatementContext();
subqueryTrackingArray = sc.getSubqueryTrackingArray();
}
nextTime += getElapsedMillis(beginTime);
}
return result;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class CurrentOfResultSet method getNextRowCore.
/**
* If open and not returned yet, returns the row.
*
* @exception StandardException thrown on failure.
*/
public ExecRow getNextRowCore() throws StandardException {
if (isXplainOnlyMode())
return null;
if (isOpen) {
if (!next) {
next = true;
if (SanityManager.DEBUG)
SanityManager.ASSERT(!cursor.isClosed(), "cursor closed");
ExecRow cursorRow = cursor.getCurrentRow();
// requalify the current row
if (cursorRow == null) {
throw StandardException.newException(SQLState.NO_CURRENT_ROW);
}
// we know it will be requested, may as well get it now.
rowLocation = cursor.getRowLocation();
// get the row from the base table, which is the real result
// row for the CurrentOfResultSet
currentRow = target.getCurrentRow();
// return null (row has been deleted under owr feet).
if (rowLocation == null || (cursorRow != null && currentRow == null)) {
activation.addWarning(StandardException.newWarning(SQLState.CURSOR_OPERATION_CONFLICT));
return null;
}
/* beetle 3865: updateable cursor using index. If underlying is a covering
* index, target is a TableScanRS (instead of a IndexRow2BaseRowRS) for the
* index scan. But the problem is it returns a compact row in index key order.
* However the ProjectRestrictRS above us that sets up the old and new column
* values expects us to return a sparse row in heap order. We have to do the
* wiring here, since we don't have IndexRow2BaseRowRS to do this work. This
* problem was not exposed before, because we never used index scan for updateable
* cursors.
*/
if (target instanceof TableScanResultSet) {
TableScanResultSet scan = (TableScanResultSet) target;
if (scan.indexCols != null && currentRow != null)
currentRow = getSparseRow(currentRow, scan.indexCols);
}
// REMIND: verify the row is still there
// at present we get an ugly exception from the store,
// Hopefully someday we can just do this:
//
// if (!rowLocation.rowExists())
// throw StandardException.newException(SQLState.LANG_NO_CURRENT_ROW, cursorName);
} else {
currentRow = null;
rowLocation = null;
}
} else {
currentRow = null;
rowLocation = null;
}
setCurrentRow(currentRow);
return currentRow;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class DMLWriteGeneratedColumnsResultSet method getCompactRow.
/**
* Take the input row and return a new compact ExecRow
* using the column positions provided in columnIndexes.
* Copies references, no cloning.
*/
protected ExecRow getCompactRow(ExecRow inputRow, int[] columnIndexes) throws StandardException {
ExecRow outRow;
int numInputCols = inputRow.nColumns();
if (columnIndexes == null) {
outRow = new ValueRow(numInputCols);
Object[] src = inputRow.getRowArray();
Object[] dst = outRow.getRowArray();
System.arraycopy(src, 0, dst, 0, src.length);
return outRow;
}
int numOutputCols = columnIndexes.length;
outRow = new ValueRow(numOutputCols);
for (int i = 0; i < numOutputCols; i++) {
outRow.setColumn(i + 1, inputRow.getColumn(columnIndexes[i]));
}
return outRow;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class GroupedAggregateResultSet method getNextRowCore.
/**
* Return the next row.
*
* @exception StandardException thrown on failure.
* @exception StandardException ResultSetNotOpen thrown if not yet open.
*
* @return the next row in the result
*/
public ExecRow getNextRowCore() throws StandardException {
if (isXplainOnlyMode())
return null;
if (!isOpen) {
return null;
}
beginTime = getCurrentTimeMillis();
if (finishedResults.size() > 0)
return makeCurrent(finishedResults.remove(0));
else if (resultsComplete)
return null;
ExecIndexRow nextRow = getNextRowFromRS();
// No rows, no work to do
if (nextRow == null)
return finalizeResults();
// result row from the sorter is complete and ready to return:
if (usingAggregateObserver)
return finishAggregation(nextRow);
/* Drain and merge rows until we find new distinct values for the grouping columns. */
while (nextRow != null) {
/* We found a new set of values for the grouping columns.
* Update the current row and return this group.
*
* Note that in the case of GROUP BY ROLLUP,
* there may be more than one level of grouped
* aggregates which is now complete. We can
* only return 1, and the other completed
* groups are held in finishedResults until
* our caller calls getNextRowCore() again to
* get the next level of results.
*/
ExecIndexRow currSortedRow = resultRows[resultRows.length - 1];
ExecRow origRow = nextRow.getClone();
initializeVectorAggregation(nextRow);
int distinguisherCol = sameGroupingValues(currSortedRow, nextRow);
for (int r = 0; r < resultRows.length; r++) {
boolean sameGroup = (rollup ? r <= distinguisherCol : distinguisherCol == numGCols());
if (sameGroup) {
/* Same group - initialize the new
row and then merge the aggregates */
// initializeVectorAggregation(nextRow);
mergeVectorAggregates(nextRow, resultRows[r], r);
} else {
setRollupColumnsToNull(resultRows[r], r);
finishedResults.add(finishAggregation(resultRows[r]));
/* Save a clone of the new row so
that it doesn't get overwritten */
resultRows[r] = (ExecIndexRow) origRow.getClone();
initializeVectorAggregation(resultRows[r]);
initializeDistinctMaps(r, false);
}
}
if (finishedResults.size() > 0) {
nextTime += getElapsedMillis(beginTime);
rowsReturned++;
return makeCurrent(finishedResults.remove(0));
}
// Get the next row
nextRow = getNextRowFromRS();
}
return finalizeResults();
}
Aggregations