use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class FromBaseTable method verifyProperties.
/**
* @see org.apache.derby.iapi.sql.compile.Optimizable#verifyProperties
* @exception StandardException Thrown on error
*/
@Override
public void verifyProperties(DataDictionary dDictionary) throws StandardException {
if (tableProperties == null) {
return;
}
/* Check here for:
* invalid properties key
* index and constraint properties
* non-existent index
* non-existent constraint
* invalid joinStrategy
* invalid value for hashInitialCapacity
* invalid value for hashLoadFactor
* invalid value for hashMaxCapacity
*/
boolean indexSpecified = false;
boolean constraintSpecified = false;
ConstraintDescriptor consDesc = null;
Enumeration<?> e = tableProperties.keys();
StringUtil.SQLEqualsIgnoreCase(tableDescriptor.getSchemaName(), "SYS");
while (e.hasMoreElements()) {
String key = (String) e.nextElement();
String value = (String) tableProperties.get(key);
if (key.equals("index")) {
// User only allowed to specify 1 of index and constraint, not both
if (constraintSpecified) {
throw StandardException.newException(SQLState.LANG_BOTH_FORCE_INDEX_AND_CONSTRAINT_SPECIFIED, getBaseTableName());
}
indexSpecified = true;
/* Validate index name - NULL means table scan */
if (!StringUtil.SQLToUpperCase(value).equals("NULL")) {
ConglomerateDescriptor cd = null;
ConglomerateDescriptor[] cds = tableDescriptor.getConglomerateDescriptors();
for (int index = 0; index < cds.length; index++) {
cd = cds[index];
String conglomerateName = cd.getConglomerateName();
if (conglomerateName != null) {
if (conglomerateName.equals(value)) {
break;
}
}
// Not a match, clear cd
cd = null;
}
// Throw exception if user specified index not found
if (cd == null) {
throw StandardException.newException(SQLState.LANG_INVALID_FORCED_INDEX1, value, getBaseTableName());
}
/* Query is dependent on the ConglomerateDescriptor */
getCompilerContext().createDependency(cd);
}
} else if (key.equals("constraint")) {
// User only allowed to specify 1 of index and constraint, not both
if (indexSpecified) {
throw StandardException.newException(SQLState.LANG_BOTH_FORCE_INDEX_AND_CONSTRAINT_SPECIFIED, getBaseTableName());
}
constraintSpecified = true;
if (!StringUtil.SQLToUpperCase(value).equals("NULL")) {
consDesc = dDictionary.getConstraintDescriptorByName(tableDescriptor, (SchemaDescriptor) null, value, false);
/* Throw exception if user specified constraint not found
* or if it does not have a backing index.
*/
if ((consDesc == null) || !consDesc.hasBackingIndex()) {
throw StandardException.newException(SQLState.LANG_INVALID_FORCED_INDEX2, value, getBaseTableName());
}
/* Query is dependent on the ConstraintDescriptor */
getCompilerContext().createDependency(consDesc);
}
} else if (key.equals("joinStrategy")) {
userSpecifiedJoinStrategy = StringUtil.SQLToUpperCase(value);
} else if (key.equals("hashInitialCapacity")) {
initialCapacity = getIntProperty(value, key);
// verify that the specified value is valid
if (initialCapacity <= 0) {
throw StandardException.newException(SQLState.LANG_INVALID_HASH_INITIAL_CAPACITY, String.valueOf(initialCapacity));
}
} else if (key.equals("hashLoadFactor")) {
try {
loadFactor = Float.parseFloat(value);
} catch (NumberFormatException nfe) {
throw StandardException.newException(SQLState.LANG_INVALID_NUMBER_FORMAT_FOR_OVERRIDE, value, key);
}
// verify that the specified value is valid
if (loadFactor <= 0.0 || loadFactor > 1.0) {
throw StandardException.newException(SQLState.LANG_INVALID_HASH_LOAD_FACTOR, value);
}
} else if (key.equals("hashMaxCapacity")) {
maxCapacity = getIntProperty(value, key);
// verify that the specified value is valid
if (maxCapacity <= 0) {
throw StandardException.newException(SQLState.LANG_INVALID_HASH_MAX_CAPACITY, String.valueOf(maxCapacity));
}
} else if (key.equals("bulkFetch")) {
bulkFetch = getIntProperty(value, key);
// verify that the specified value is valid
if (bulkFetch <= 0) {
throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_VALUE, String.valueOf(bulkFetch));
}
// no bulk fetch on updatable scans
if (forUpdate()) {
throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_UPDATEABLE);
}
} else if (key.equals("validateCheckConstraint")) {
// the property "validateCheckConstraint" is read earlier
// cf. isValidatingCheckConstraint
} else {
// No other "legal" values at this time
throw StandardException.newException(SQLState.LANG_INVALID_FROM_TABLE_PROPERTY, key, "index, constraint, joinStrategy");
}
}
/* If user specified a non-null constraint name(DERBY-1707), then
* replace it in the properties list with the underlying index name to
* simplify the code in the optimizer.
* NOTE: The code to get from the constraint name, for a constraint
* with a backing index, to the index name is convoluted. Given
* the constraint name, we can get the conglomerate id from the
* ConstraintDescriptor. We then use the conglomerate id to get
* the ConglomerateDescriptor from the DataDictionary and, finally,
* we get the index name (conglomerate name) from the ConglomerateDescriptor.
*/
if (constraintSpecified && consDesc != null) {
ConglomerateDescriptor cd = dDictionary.getConglomerateDescriptor(consDesc.getConglomerateId());
String indexName = cd.getConglomerateName();
tableProperties.remove("constraint");
tableProperties.put("index", indexName);
}
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class FromBaseTable method generateDistinctScan.
private void generateDistinctScan(ExpressionClassBuilder acb, MethodBuilder mb) throws StandardException {
ConglomerateDescriptor cd = getTrulyTheBestAccessPath().getConglomerateDescriptor();
CostEstimate costEst = getFinalCostEstimate();
int colRefItem = (referencedCols == null) ? -1 : acb.addItem(referencedCols);
boolean tableLockGranularity = tableDescriptor.getLockGranularity() == TableDescriptor.TABLE_LOCK_GRANULARITY;
/*
** getDistinctScanResultSet
** (
** activation,
** resultSetNumber,
** resultRowAllocator,
** conglomereNumber,
** tableName,
** optimizeroverride
** indexName,
** colRefItem,
** lockMode,
** tableLocked,
** isolationLevel,
** optimizerEstimatedRowCount,
** optimizerEstimatedRowCost,
** closeCleanupMethod
** );
*/
/* Get the hash key columns and wrap them in a formattable */
int[] hashKeyCols;
hashKeyCols = new int[getResultColumns().size()];
if (referencedCols == null) {
for (int index = 0; index < hashKeyCols.length; index++) {
hashKeyCols[index] = index;
}
} else {
int index = 0;
for (int colNum = referencedCols.anySetBit(); colNum != -1; colNum = referencedCols.anySetBit(colNum)) {
hashKeyCols[index++] = colNum;
}
}
FormatableIntHolder[] fihArray = FormatableIntHolder.getFormatableIntHolders(hashKeyCols);
FormatableArrayHolder hashKeyHolder = new FormatableArrayHolder(fihArray);
int hashKeyItem = acb.addItem(hashKeyHolder);
long conglomNumber = cd.getConglomerateNumber();
StaticCompiledOpenConglomInfo scoci = getLanguageConnectionContext().getTransactionCompile().getStaticCompiledConglomInfo(conglomNumber);
acb.pushGetResultSetFactoryExpression(mb);
acb.pushThisAsActivation(mb);
mb.push(conglomNumber);
mb.push(acb.addItem(scoci));
mb.push(acb.addItem(getResultColumns().buildRowTemplate(referencedCols, false)));
mb.push(getResultSetNumber());
mb.push(hashKeyItem);
mb.push(tableDescriptor.getName());
// run time statistics.
if (tableProperties != null)
mb.push(org.apache.derby.iapi.util.PropertyUtil.sortProperties(tableProperties));
else
mb.pushNull("java.lang.String");
pushIndexName(cd, mb);
mb.push(cd.isConstraint());
mb.push(colRefItem);
mb.push(getTrulyTheBestAccessPath().getLockMode());
mb.push(tableLockGranularity);
mb.push(getCompilerContext().getScanIsolationLevel());
mb.push(costEst.singleScanRowCount());
mb.push(costEst.getEstimatedCost());
mb.callMethod(VMOpcode.INVOKEINTERFACE, (String) null, "getDistinctScanResultSet", ClassName.NoPutResultSet, 16);
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class FromBaseTable method changeAccessPath.
/**
* @see ResultSetNode#changeAccessPath
*
* @exception StandardException Thrown on error
*/
@Override
ResultSetNode changeAccessPath() throws StandardException {
ResultSetNode retval;
AccessPath ap = getTrulyTheBestAccessPath();
ConglomerateDescriptor trulyTheBestConglomerateDescriptor = ap.getConglomerateDescriptor();
JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
Optimizer opt = ap.getOptimizer();
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceChangingAccessPathForTable(tableNumber);
}
if (SanityManager.DEBUG) {
SanityManager.ASSERT(trulyTheBestConglomerateDescriptor != null, "Should only modify access path after conglomerate has been chosen.");
}
/*
** Make sure user-specified bulk fetch is OK with the chosen join
** strategy.
*/
if (bulkFetch != UNSET) {
if (!trulyTheBestJoinStrategy.bulkFetchOK()) {
throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_WITH_JOIN_TYPE, trulyTheBestJoinStrategy.getName());
} else // bulkFetch has no meaning for hash join, just ignore it
if (trulyTheBestJoinStrategy.ignoreBulkFetch()) {
disableBulkFetch();
} else // bug 4431 - ignore bulkfetch property if it's 1 row resultset
if (isOneRowResultSet()) {
disableBulkFetch();
}
}
// bulkFetch = 1 is the same as no bulk fetch
if (bulkFetch == 1) {
disableBulkFetch();
}
/* Remove any redundant join clauses. A redundant join clause is one
* where there are other join clauses in the same equivalence class
* after it in the PredicateList.
*/
restrictionList.removeRedundantPredicates();
/*
** Divide up the predicates for different processing phases of the
** best join strategy.
*/
storeRestrictionList = new PredicateList(getContextManager());
nonStoreRestrictionList = new PredicateList(getContextManager());
requalificationRestrictionList = new PredicateList(getContextManager());
trulyTheBestJoinStrategy.divideUpPredicateLists(this, restrictionList, storeRestrictionList, nonStoreRestrictionList, requalificationRestrictionList, getDataDictionary());
/* Check to see if we are going to do execution-time probing
* of an index using IN-list values. We can tell by looking
* at the restriction list: if there is an IN-list probe
* predicate that is also a start/stop key then we know that
* we're going to do execution-time probing. In that case
* we disable bulk fetching to minimize the number of non-
* matching rows that we read from disk. RESOLVE: Do we
* really need to completely disable bulk fetching here,
* or can we do something else?
*/
for (Predicate pred : restrictionList) {
if (pred.isInListProbePredicate() && pred.isStartKey()) {
disableBulkFetch();
multiProbing = true;
break;
}
}
/*
** Consider turning on bulkFetch if it is turned
** off. Only turn it on if it is a not an updatable
** scan and if it isn't a oneRowResultSet, and
** not a subquery, and it is OK to use bulk fetch
** with the chosen join strategy. NOTE: the subquery logic
** could be more sophisticated -- we are taking
** the safe route in avoiding reading extra
** data for something like:
**
** select x from t where x in (select y from t)
**
** In this case we want to stop the subquery
** evaluation as soon as something matches.
*/
if (trulyTheBestJoinStrategy.bulkFetchOK() && !(trulyTheBestJoinStrategy.ignoreBulkFetch()) && !bulkFetchTurnedOff && (bulkFetch == UNSET) && !forUpdate() && !isOneRowResultSet() && getLevel() == 0 && !validatingCheckConstraint) {
bulkFetch = getDefaultBulkFetch();
}
/* Statement is dependent on the chosen conglomerate. */
getCompilerContext().createDependency(trulyTheBestConglomerateDescriptor);
/* No need to modify access path if conglomerate is the heap */
if (!trulyTheBestConglomerateDescriptor.isIndex()) {
/*
** We need a little special logic for SYSSTATEMENTS
** here. SYSSTATEMENTS has a hidden column at the
** end. When someone does a select * we don't want
** to get that column from the store. So we'll always
** generate a partial read bitSet if we are scanning
** SYSSTATEMENTS to ensure we don't get the hidden
** column.
*/
boolean isSysstatements = tableName.equals("SYS", "SYSSTATEMENTS");
/* Template must reflect full row.
* Compact RCL down to partial row.
*/
templateColumns = getResultColumns();
referencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), isSysstatements, false);
setResultColumns(getResultColumns().compactColumns(isCursorTargetTable(), isSysstatements));
return this;
}
/* Derby-1087: use data page when returning an updatable resultset */
if (ap.getCoveringIndexScan() && (!isCursorTargetTable())) {
/* Massage resultColumns so that it matches the index. */
setResultColumns(newResultColumns(getResultColumns(), trulyTheBestConglomerateDescriptor, baseConglomerateDescriptor, false));
/* We are going against the index. The template row must be the full index row.
* The template row will have the RID but the result row will not
* since there is no need to go to the data page.
*/
templateColumns = newResultColumns(getResultColumns(), trulyTheBestConglomerateDescriptor, baseConglomerateDescriptor, false);
templateColumns.addRCForRID();
// If this is for update then we need to get the RID in the result row
if (forUpdate()) {
getResultColumns().addRCForRID();
}
/* Compact RCL down to the partial row. We always want a new
* RCL and FormatableBitSet because this is a covering index. (This is
* because we don't want the RID in the partial row returned
* by the store.)
*/
referencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), true, false);
setResultColumns(getResultColumns().compactColumns(isCursorTargetTable(), true));
getResultColumns().setIndexRow(baseConglomerateDescriptor.getConglomerateNumber(), forUpdate());
return this;
}
/* Statement is dependent on the base conglomerate if this is
* a non-covering index.
*/
getCompilerContext().createDependency(baseConglomerateDescriptor);
/*
** On bulkFetch, we need to add the restrictions from
** the TableScan and reapply them here.
*/
if (bulkFetch != UNSET) {
restrictionList.copyPredicatesToOtherList(requalificationRestrictionList);
}
/*
** We know the chosen conglomerate is an index. We need to allocate
** an IndexToBaseRowNode above us, and to change the result column
** list for this FromBaseTable to reflect the columns in the index.
** We also need to shift "cursor target table" status from this
** FromBaseTable to the new IndexToBaseRowNow (because that's where
** a cursor can fetch the current row).
*/
ResultColumnList newResultColumns = newResultColumns(getResultColumns(), trulyTheBestConglomerateDescriptor, baseConglomerateDescriptor, true);
/* Compact the RCL for the IndexToBaseRowNode down to
* the partial row for the heap. The referenced BitSet
* will reflect only those columns coming from the heap.
* (ie, it won't reflect columns coming from the index.)
* NOTE: We need to re-get all of the columns from the heap
* when doing a bulk fetch because we will be requalifying
* the row in the IndexRowToBaseRow.
*/
// Get the BitSet for all of the referenced columns
FormatableBitSet indexReferencedCols = null;
FormatableBitSet heapReferencedCols;
if ((bulkFetch == UNSET) && (requalificationRestrictionList == null || requalificationRestrictionList.size() == 0)) {
/* No BULK FETCH or requalification, XOR off the columns coming from the heap
* to get the columns coming from the index.
*/
indexReferencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), true, false);
heapReferencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), true, true);
if (heapReferencedCols != null) {
indexReferencedCols.xor(heapReferencedCols);
}
} else {
// BULK FETCH or requalification - re-get all referenced columns from the heap
heapReferencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), true, false);
}
ResultColumnList heapRCL = getResultColumns().compactColumns(isCursorTargetTable(), false);
heapRCL.setIndexRow(baseConglomerateDescriptor.getConglomerateNumber(), forUpdate());
retval = new IndexToBaseRowNode(this, baseConglomerateDescriptor, heapRCL, isCursorTargetTable(), heapReferencedCols, indexReferencedCols, requalificationRestrictionList, forUpdate(), tableProperties, getContextManager());
/*
** The template row is all the columns. The
** result set is the compacted column list.
*/
setResultColumns(newResultColumns);
templateColumns = newResultColumns(getResultColumns(), trulyTheBestConglomerateDescriptor, baseConglomerateDescriptor, false);
/* Since we are doing a non-covered index scan, if bulkFetch is on, then
* the only columns that we need to get are those columns referenced in the start and stop positions
* and the qualifiers (and the RID) because we will need to re-get all of the other
* columns from the heap anyway.
* At this point in time, columns referenced anywhere in the column tree are
* marked as being referenced. So, we clear all of the references, walk the
* predicate list and remark the columns referenced from there and then add
* the RID before compacting the columns.
*/
if (bulkFetch != UNSET) {
getResultColumns().markAllUnreferenced();
storeRestrictionList.markReferencedColumns();
if (nonStoreRestrictionList != null) {
nonStoreRestrictionList.markReferencedColumns();
}
}
getResultColumns().addRCForRID();
templateColumns.addRCForRID();
// Compact the RCL for the index scan down to the partial row.
referencedCols = getResultColumns().getReferencedFormatableBitSet(isCursorTargetTable(), false, false);
setResultColumns(getResultColumns().compactColumns(isCursorTargetTable(), false));
getResultColumns().setIndexRow(baseConglomerateDescriptor.getConglomerateNumber(), forUpdate());
/* We must remember if this was the cursorTargetTable
* in order to get the right locking on the scan.
*/
getUpdateLocks = isCursorTargetTable();
setCursorTargetTable(false);
return retval;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class FromBaseTable method nextAccessPath.
/*
* Optimizable interface.
*/
/**
* @see org.apache.derby.iapi.sql.compile.Optimizable#nextAccessPath
*
* @exception StandardException Thrown on error
*/
@Override
public boolean nextAccessPath(Optimizer optimizer, OptimizablePredicateList predList, RowOrdering rowOrdering) throws StandardException {
String userSpecifiedIndexName = getUserSpecifiedIndexName();
AccessPath ap = getCurrentAccessPath();
ConglomerateDescriptor currentConglomerateDescriptor = ap.getConglomerateDescriptor();
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceNextAccessPath(getExposedName(), ((predList == null) ? 0 : predList.size()));
}
/*
** Remove the ordering of the current conglomerate descriptor,
** if any.
*/
rowOrdering.removeOptimizable(getTableNumber());
if (userSpecifiedIndexName != null) {
/*
** User specified an index name, so we should look at only one
** index. If there is a current conglomerate descriptor, and there
** are no more join strategies, we've already looked at the index,
** so go back to null.
*/
if (currentConglomerateDescriptor != null) {
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
currentConglomerateDescriptor = null;
}
} else {
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceLookingForSpecifiedIndex(userSpecifiedIndexName, tableNumber);
}
if (StringUtil.SQLToUpperCase(userSpecifiedIndexName).equals("NULL")) {
/* Special case - user-specified table scan */
currentConglomerateDescriptor = tableDescriptor.getConglomerateDescriptor(tableDescriptor.getHeapConglomerateId());
} else {
/* User-specified index name */
getConglomDescs();
for (int index = 0; index < conglomDescs.length; index++) {
currentConglomerateDescriptor = conglomDescs[index];
String conglomerateName = currentConglomerateDescriptor.getConglomerateName();
if (conglomerateName != null) {
/* Have we found the desired index? */
if (conglomerateName.equals(userSpecifiedIndexName)) {
break;
}
}
}
/* We should always find a match */
if (SanityManager.DEBUG) {
if (currentConglomerateDescriptor == null) {
SanityManager.THROWASSERT("Expected to find match for forced index " + userSpecifiedIndexName);
}
}
}
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("No join strategy found");
}
}
}
} else {
if (currentConglomerateDescriptor != null) {
/*
** Once we have a conglomerate descriptor, cycle through
** the join strategies (done in parent).
*/
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
/*
** When we're out of join strategies, go to the next
** conglomerate descriptor.
*/
currentConglomerateDescriptor = getNextConglom(currentConglomerateDescriptor);
/*
** New conglomerate, so step through join strategies
** again.
*/
resetJoinStrategies(optimizer);
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("No join strategy found");
}
}
}
} else {
/* Get the first conglomerate descriptor */
currentConglomerateDescriptor = getFirstConglom();
if (!super.nextAccessPath(optimizer, predList, rowOrdering)) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("No join strategy found");
}
}
}
}
if (currentConglomerateDescriptor == null) {
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceNoMoreConglomerates(tableNumber);
}
} else {
currentConglomerateDescriptor.setColumnNames(columnNames);
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceConsideringConglomerate(currentConglomerateDescriptor, tableNumber);
}
}
/*
** Tell the rowOrdering that what the ordering of this conglomerate is
*/
if (currentConglomerateDescriptor != null) {
if (!currentConglomerateDescriptor.isIndex()) {
/* If we are scanning the heap, but there
* is a full match on a unique key, then
* we can say that the table IS NOT unordered.
* (We can't currently say what the ordering is
* though.)
*/
if (!isOneRowResultSet(predList)) {
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceAddingUnorderedOptimizable(((predList == null) ? 0 : predList.size()));
}
rowOrdering.addUnorderedOptimizable(this);
} else {
if (optimizerTracingIsOn()) {
getOptimizerTracer().traceScanningHeapWithUniqueKey();
}
}
} else {
IndexRowGenerator irg = currentConglomerateDescriptor.getIndexDescriptor();
int[] baseColumnPositions = irg.baseColumnPositions();
boolean[] isAscending = irg.isAscending();
for (int i = 0; i < baseColumnPositions.length; i++) {
/*
** Don't add the column to the ordering if it's already
** an ordered column. This can happen in the following
** case:
**
** create index ti on t(x, y);
** select * from t where x = 1 order by y;
**
** Column x is always ordered, so we want to avoid the
** sort when using index ti. This is accomplished by
** making column y appear as the first ordered column
** in the list.
*/
if (!rowOrdering.orderedOnColumn(isAscending[i] ? RowOrdering.ASCENDING : RowOrdering.DESCENDING, getTableNumber(), baseColumnPositions[i])) {
rowOrdering.nextOrderPosition(isAscending[i] ? RowOrdering.ASCENDING : RowOrdering.DESCENDING);
rowOrdering.addOrderedColumn(isAscending[i] ? RowOrdering.ASCENDING : RowOrdering.DESCENDING, getTableNumber(), baseColumnPositions[i]);
}
}
}
}
ap.setConglomerateDescriptor(currentConglomerateDescriptor);
return currentConglomerateDescriptor != null;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class DMLModStatementNode method markAffectedIndexes.
protected void markAffectedIndexes(List<ConglomerateDescriptor> affectedConglomerates) throws StandardException {
ConglomerateDescriptor cd;
int indexCount = affectedConglomerates.size();
CompilerContext cc = getCompilerContext();
indicesToMaintain = new IndexRowGenerator[indexCount];
indexConglomerateNumbers = new long[indexCount];
indexNames = new String[indexCount];
for (int ictr = 0; ictr < indexCount; ictr++) {
cd = affectedConglomerates.get(ictr);
indicesToMaintain[ictr] = cd.getIndexDescriptor();
indexConglomerateNumbers[ictr] = cd.getConglomerateNumber();
indexNames[ictr] = ((cd.isConstraint()) ? null : cd.getConglomerateName());
cc.createDependency(cd);
}
}
Aggregations