use of org.apache.derby.iapi.sql.execute.ExecIndexRow in project derby by apache.
the class DistinctScalarAggregateResultSet method loadSorter.
// /////////////////////////////////////////////////////////////////////////////
//
// MISC UTILITIES
//
// /////////////////////////////////////////////////////////////////////////////
/**
* Load up the sorter. Feed it every row from the
* source scan. If we have a vector aggregate, initialize
* the aggregator for each source row. When done, close
* the source scan and open the sort. Return the sort
* scan controller.
*
* @exception StandardException thrown on failure.
*
* @return the sort controller
*/
private ScanController loadSorter() throws StandardException {
SortController sorter;
ExecRow sourceRow;
ExecIndexRow sortTemplateRow = getRowTemplate();
int inputRowCountEstimate = (int) optimizerEstimatedRowCount;
TransactionController tc = getTransactionController();
/*
** We have a distinct aggregate so, we'll need
** to do a sort. We use all of the sorting columns and
** drop the aggregation on the distinct column. Then
** we'll feed this into the sorter again w/o the distinct
** column in the ordering list.
*/
GenericAggregator[] aggsNoDistinct = getSortAggregators(aggInfoList, true, activation.getLanguageConnectionContext(), source);
SortObserver sortObserver = new AggregateSortObserver(true, aggsNoDistinct, aggregates, sortTemplateRow);
sortId = tc.createSort((Properties) null, sortTemplateRow.getRowArray(), order, sortObserver, // not in order
false, // est rows, -1 means no idea
inputRowCountEstimate, // est rowsize
maxRowSize);
sorter = tc.openSort(sortId);
dropDistinctAggSort = true;
while ((sourceRow = source.getNextRowCore()) != null) {
sorter.insert(sourceRow.getRowArray());
rowsInput++;
}
/*
** End the sort and open up the result set
*/
sorter.completedInserts();
scanController = tc.openSortScan(sortId, activation.getResultSetHoldability());
/*
** Aggs are initialized and input rows
** are in order.
*/
inputRowCountEstimate = rowsInput;
return scanController;
}
use of org.apache.derby.iapi.sql.execute.ExecIndexRow in project derby by apache.
the class DistinctScalarAggregateResultSet method getNextRowCore.
/* RESOLVE - THIS NEXT METHOD IS ONLY INCLUDED BECAUSE OF A JIT ERROR. THERE IS NO OTHER
* REASON TO OVERRIDE IT IN DistinctScalarAggregateResultSet. THE BUG WAS FOUND IN
* 1.1.6 WITH THE JIT.
*/
/**
* Return the next row. If it is a scalar aggregate scan
*
* @exception StandardException thrown on failure.
* @exception StandardException ResultSetNotOpen thrown if not yet open.
*
* @return the next row in the result
*/
public ExecRow getNextRowCore() throws StandardException {
if (isXplainOnlyMode())
return null;
ExecIndexRow execIndexRow = null;
ExecIndexRow aggResult = null;
boolean cloneArg = true;
beginTime = getCurrentTimeMillis();
if (isOpen) {
/*
** We are dealing with a scalar aggregate.
** Zip through each row and accumulate.
** Accumulate into the first row. Only
** the first row is cloned.
*/
while ((execIndexRow = getRowFromResultSet(cloneArg)) != null) {
/*
** Use a clone of the first row as our result.
** We need to get a clone since we will be reusing
** the original as the wrapper of the source row.
** Turn cloning off since we wont be keeping any
** other rows.
*/
if (aggResult == null) {
cloneArg = false;
aggResult = (ExecIndexRow) execIndexRow.getClone();
} else {
/*
** Accumulate all aggregates. For the distinct
** aggregates, we'll be accumulating, for the nondistinct
** we'll be merging.
*/
accumulateScalarAggregation(execIndexRow, aggResult, true);
}
}
/*
** If we have aggregates, we need to generate a
** value for them now. Only finish the aggregation
** if we haven't yet (i.e. if countOfRows == 0).
** If there weren't any input rows, we'll allocate
** one here.
*/
if (countOfRows == 0) {
aggResult = finishAggregation(aggResult);
setCurrentRow(aggResult);
countOfRows++;
}
}
nextTime += getElapsedMillis(beginTime);
return aggResult;
}
use of org.apache.derby.iapi.sql.execute.ExecIndexRow in project derby by apache.
the class GroupedAggregateResultSet method loadSorter.
/**
* Load up the sorter. Feed it every row from the
* source scan. When done, close
* the source scan and open the sort. Return the sort
* scan controller.
*
* @exception StandardException thrown on failure.
*
* @return the sort controller
*/
private ScanController loadSorter() throws StandardException {
SortController sorter;
ExecRow inputRow;
int inputRowCountEstimate = (int) optimizerEstimatedRowCount;
ExecIndexRow sortTemplateRow = getRowTemplate();
tc = getTransactionController();
SortObserver observer;
if (usingAggregateObserver)
observer = new AggregateSortObserver(true, aggregates, aggregates, sortTemplateRow);
else
observer = new BasicSortObserver(true, false, sortTemplateRow, true);
genericSortId = tc.createSort((Properties) null, sortTemplateRow.getRowArray(), order, observer, false, // est rows
inputRowCountEstimate, // est rowsize
maxRowSize);
sorter = tc.openSort(genericSortId);
/* The sorter is responsible for doing the cloning */
while ((inputRow = getNextRowFromRS()) != null) {
sorter.insert(inputRow.getRowArray());
}
source.close();
sorter.completedInserts();
sortProperties = sorter.getSortInfo().getAllSortInfo(sortProperties);
if (aggInfoList.hasDistinct()) {
/*
** If there was a distinct aggregate, then that column
** was automatically included as the last column in
** the sort ordering. But we don't want it to be part
** of the ordering anymore, because we aren't grouping
** by that column, we just sorted it so that distinct
** aggregation would see the values in order.
*/
// Although it seems like N aggs could have been
// added at the end, in fact only one has been
// FIXME -- need to get GroupByNode to handle this
// correctly, but that requires understanding
// scalar distinct aggregates.
numDistinctAggs = 1;
}
return tc.openSortScan(genericSortId, activation.getResultSetHoldability());
}
use of org.apache.derby.iapi.sql.execute.ExecIndexRow in project derby by apache.
the class SYSFOREIGNKEYSRowFactory method makeRow.
// ///////////////////////////////////////////////////////////////////////////
//
// METHODS
//
// ///////////////////////////////////////////////////////////////////////////
/**
* Make a SYSFOREIGNKEYS row
*
* @return Row suitable for inserting into SYSFOREIGNKEYS.
*
* @exception StandardException thrown on failure
*/
public ExecRow makeRow(TupleDescriptor td, TupleDescriptor parent) throws StandardException {
DataValueDescriptor col;
ExecIndexRow row;
String constraintId = null;
String keyConstraintId = null;
String conglomId = null;
String raDeleteRule = "N";
String raUpdateRule = "N";
if (td != null) {
ForeignKeyConstraintDescriptor cd = (ForeignKeyConstraintDescriptor) td;
constraintId = cd.getUUID().toString();
ReferencedKeyConstraintDescriptor refCd = cd.getReferencedConstraint();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(refCd != null, "this fk returned a null referenced key");
}
keyConstraintId = refCd.getUUID().toString();
conglomId = cd.getIndexUUIDString();
raDeleteRule = getRefActionAsString(cd.getRaDeleteRule());
raUpdateRule = getRefActionAsString(cd.getRaUpdateRule());
}
/* Build the row */
row = getExecutionFactory().getIndexableRow(SYSFOREIGNKEYS_COLUMN_COUNT);
/* 1st column is CONSTRAINTID (UUID - char(36)) */
row.setColumn(SYSFOREIGNKEYS_CONSTRAINTID, new SQLChar(constraintId));
/* 2nd column is CONGLOMERATEID (UUID - char(36)) */
row.setColumn(SYSFOREIGNKEYS_CONGLOMERATEID, new SQLChar(conglomId));
/* 3rd column is KEYCONSTRAINTID (UUID - char(36)) */
row.setColumn(SYSFOREIGNKEYS_KEYCONSTRAINTID, new SQLChar(keyConstraintId));
// currently, DELETERULE and UPDATERULE are always "R" for restrict
/* 4th column is DELETERULE char(1) */
row.setColumn(SYSFOREIGNKEYS_DELETERULE, new SQLChar(raDeleteRule));
/* 5th column is UPDATERULE char(1) */
row.setColumn(SYSFOREIGNKEYS_UPDATERULE, new SQLChar(raUpdateRule));
return row;
}
use of org.apache.derby.iapi.sql.execute.ExecIndexRow in project derby by apache.
the class SYSPERMSRowFactory method buildIndexKeyRow.
/**
* builds an index key row given for a given index number.
*/
public ExecIndexRow buildIndexKeyRow(int indexNumber, PermissionsDescriptor perm) throws StandardException {
ExecIndexRow row = null;
switch(indexNumber) {
case GRANTEE_OBJECTID_GRANTOR_INDEX_NUM:
// RESOLVE We do not support the FOR GRANT OPTION, so generic permission rows are unique on the
// grantee and object UUID columns. The grantor column will always have the name of the owner of the
// object. So the index key, used for searching the index, only has grantee and object UUID columns.
// It does not have a grantor column.
row = getExecutionFactory().getIndexableRow(2);
row.setColumn(1, getAuthorizationID(perm.getGrantee()));
String protectedObjectsIDStr = ((PermDescriptor) perm).getPermObjectId().toString();
row.setColumn(2, new SQLChar(protectedObjectsIDStr));
break;
case PERMS_UUID_IDX_NUM:
row = getExecutionFactory().getIndexableRow(1);
String permUUIDStr = ((PermDescriptor) perm).getUUID().toString();
row.setColumn(1, new SQLChar(permUUIDStr));
break;
}
return row;
}
Aggregations