use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class DistinctScalarAggregateResultSet method loadSorter.
// /////////////////////////////////////////////////////////////////////////////
//
// MISC UTILITIES
//
// /////////////////////////////////////////////////////////////////////////////
/**
* Load up the sorter. Feed it every row from the
* source scan. If we have a vector aggregate, initialize
* the aggregator for each source row. When done, close
* the source scan and open the sort. Return the sort
* scan controller.
*
* @exception StandardException thrown on failure.
*
* @return the sort controller
*/
private ScanController loadSorter() throws StandardException {
SortController sorter;
ExecRow sourceRow;
ExecIndexRow sortTemplateRow = getRowTemplate();
int inputRowCountEstimate = (int) optimizerEstimatedRowCount;
TransactionController tc = getTransactionController();
/*
** We have a distinct aggregate so, we'll need
** to do a sort. We use all of the sorting columns and
** drop the aggregation on the distinct column. Then
** we'll feed this into the sorter again w/o the distinct
** column in the ordering list.
*/
GenericAggregator[] aggsNoDistinct = getSortAggregators(aggInfoList, true, activation.getLanguageConnectionContext(), source);
SortObserver sortObserver = new AggregateSortObserver(true, aggsNoDistinct, aggregates, sortTemplateRow);
sortId = tc.createSort((Properties) null, sortTemplateRow.getRowArray(), order, sortObserver, // not in order
false, // est rows, -1 means no idea
inputRowCountEstimate, // est rowsize
maxRowSize);
sorter = tc.openSort(sortId);
dropDistinctAggSort = true;
while ((sourceRow = source.getNextRowCore()) != null) {
sorter.insert(sourceRow.getRowArray());
rowsInput++;
}
/*
** End the sort and open up the result set
*/
sorter.completedInserts();
scanController = tc.openSortScan(sortId, activation.getResultSetHoldability());
/*
** Aggs are initialized and input rows
** are in order.
*/
inputRowCountEstimate = rowsInput;
return scanController;
}
use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class HashScanResultSet method reopenCore.
/**
* reopen this ResultSet.
*
* @exception StandardException thrown if cursor finished.
*/
public void reopenCore() throws StandardException {
TransactionController tc;
if (SanityManager.DEBUG) {
SanityManager.ASSERT(isOpen, "HashScanResultSet already open");
}
beginTime = getCurrentTimeMillis();
resetProbeVariables();
numOpens++;
openTime += getElapsedMillis(beginTime);
}
use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class HashTableResultSet method openCore.
//
// NoPutResultSet interface
//
/**
* open a scan on the table. scan parameters are evaluated
* at each open, so there is probably some way of altering
* their values...
*
* @exception StandardException thrown if cursor finished.
*/
public void openCore() throws StandardException {
TransactionController tc;
beginTime = getCurrentTimeMillis();
// - sometimes get NullPointerException in openCore().
if (SanityManager.DEBUG) {
SanityManager.ASSERT(source != null, "HTRS().openCore(), source expected to be non-null");
}
// is access to open controlled and ensured valid.
if (SanityManager.DEBUG)
SanityManager.ASSERT(!isOpen, "HashTableResultSet already open");
// Get the current transaction controller
tc = activation.getTransactionController();
if (!hashTableBuilt) {
source.openCore();
/* Create and populate the hash table. We pass
* ourself in as the row source. This allows us
* to apply the single table predicates to the
* rows coming from our child as we build the
* hash table.
*/
ht = new BackingStoreHashtable(tc, this, keyColumns, removeDuplicates, (int) optimizerEstimatedRowCount, maxInMemoryRowCount, (int) initialCapacity, loadFactor, skipNullKeyColumns, false);
if (runTimeStatsOn) {
hashtableSize = ht.size();
if (scanProperties == null) {
scanProperties = new Properties();
}
try {
if (ht != null) {
ht.getAllRuntimeStats(scanProperties);
}
} catch (StandardException se) {
// ignore
}
}
isOpen = true;
hashTableBuilt = true;
}
resetProbeVariables();
numOpens++;
openTime += getElapsedMillis(beginTime);
}
use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class DDLConstantAction method getSchemaDescriptorForCreate.
/**
* Get the schema descriptor in the creation of an object in
* the passed in schema.
*
* @param dd the data dictionary
* @param activation activation
* @param schemaName name of the schema
*
* @return the schema descriptor
*
* @exception StandardException if the schema does not exist
*/
static SchemaDescriptor getSchemaDescriptorForCreate(DataDictionary dd, Activation activation, String schemaName) throws StandardException {
TransactionController tc = activation.getLanguageConnectionContext().getTransactionExecute();
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc, false);
if (sd == null || sd.getUUID() == null) {
CreateSchemaConstantAction csca = new CreateSchemaConstantAction(schemaName, (String) null);
if (activation.getLanguageConnectionContext().isInitialDefaultSchema(schemaName)) {
// DERBY-48: This operation creates the user's initial
// default schema and we don't want to hold a lock for
// SYSSCHEMAS for the duration of the user transaction
// since connection attempts may block, so we perform
// the creation in a nested transaction (if possible)
// so we can commit at once and release locks.
executeCAPreferSubTrans(csca, tc, activation);
} else {
// create the schema in the user transaction
try {
csca.executeConstantAction(activation);
} catch (StandardException se) {
if (se.getMessageId().equals(SQLState.LANG_OBJECT_ALREADY_EXISTS)) {
// Ignore "Schema already exists". Another thread has
// probably created it after we checked for it
} else {
throw se;
}
}
}
sd = dd.getSchemaDescriptor(schemaName, tc, true);
}
return sd;
}
use of org.apache.derby.iapi.store.access.TransactionController in project derby by apache.
the class DropConstraintConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for DROP CONSTRAINT.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
ConstraintDescriptor conDesc = null;
TableDescriptor td;
UUID indexId = null;
String indexUUIDString;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
td = dd.getTableDescriptor(tableId);
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tableName);
}
/* Table gets locked in AlterTableConstantAction */
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor tdSd = td.getSchemaDescriptor();
SchemaDescriptor constraintSd = constraintSchemaName == null ? tdSd : dd.getSchemaDescriptor(constraintSchemaName, tc, true);
/* Get the constraint descriptor for the index, along
* with an exclusive row lock on the row in sys.sysconstraints
* in order to ensure that no one else compiles against the
* index.
*/
if (// this means "alter table drop primary key"
constraintName == null)
conDesc = dd.getConstraintDescriptors(td).getPrimaryKey();
else
conDesc = dd.getConstraintDescriptorByName(td, constraintSd, constraintName, true);
// Error if constraint doesn't exist
if (conDesc == null) {
String errorName = constraintName == null ? "PRIMARY KEY" : (constraintSd.getSchemaName() + "." + constraintName);
throw StandardException.newException(SQLState.LANG_DROP_OR_ALTER_NON_EXISTING_CONSTRAINT, errorName, td.getQualifiedName());
}
switch(verifyType) {
case DataDictionary.UNIQUE_CONSTRAINT:
if (conDesc.getConstraintType() != verifyType)
throw StandardException.newException(SQLState.LANG_DROP_CONSTRAINT_TYPE, constraintName, "UNIQUE");
break;
case DataDictionary.CHECK_CONSTRAINT:
if (conDesc.getConstraintType() != verifyType)
throw StandardException.newException(SQLState.LANG_DROP_CONSTRAINT_TYPE, constraintName, "CHECK");
break;
case DataDictionary.FOREIGNKEY_CONSTRAINT:
if (conDesc.getConstraintType() != verifyType)
throw StandardException.newException(SQLState.LANG_DROP_CONSTRAINT_TYPE, constraintName, "FOREIGN KEY");
break;
}
boolean cascadeOnRefKey = (cascade && conDesc instanceof ReferencedKeyConstraintDescriptor);
if (!cascadeOnRefKey) {
dm.invalidateFor(conDesc, DependencyManager.DROP_CONSTRAINT, lcc);
}
/*
** If we had a primary/unique key and it is drop cascade,
** drop all the referencing keys now. We MUST do this AFTER
** dropping the referenced key because otherwise we would
** be repeatedly changing the reference count of the referenced
** key and generating unnecessary I/O.
*/
dropConstraint(conDesc, activation, lcc, !cascadeOnRefKey);
if (cascadeOnRefKey) {
ForeignKeyConstraintDescriptor fkcd;
ReferencedKeyConstraintDescriptor cd;
ConstraintDescriptorList cdl;
cd = (ReferencedKeyConstraintDescriptor) conDesc;
cdl = cd.getForeignKeyConstraints(ReferencedKeyConstraintDescriptor.ALL);
int cdlSize = cdl.size();
for (int index = 0; index < cdlSize; index++) {
fkcd = (ForeignKeyConstraintDescriptor) cdl.elementAt(index);
dm.invalidateFor(fkcd, DependencyManager.DROP_CONSTRAINT, lcc);
dropConstraint(fkcd, activation, lcc, true);
}
/*
** We told dropConstraintAndIndex not to
** remove our dependencies, so send an invalidate,
** and drop the dependencies.
*/
dm.invalidateFor(conDesc, DependencyManager.DROP_CONSTRAINT, lcc);
dm.clearDependencies(lcc, conDesc);
}
}
Aggregations