use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class PredicateList method selectivity.
/**
* @see OptimizablePredicateList#selectivity
*/
public double selectivity(Optimizable optTable) throws StandardException {
TableDescriptor td = optTable.getTableDescriptor();
ConglomerateDescriptor[] conglomerates = td.getConglomerateDescriptors();
int numPredicates = size();
int numConglomerates = conglomerates.length;
if (numConglomerates == 1)
// one conglomerate; must be heap.
return -1.0d;
if (numPredicates == 0)
// no predicates why bother?
return -1.0d;
boolean nothingYet = true;
/* before we start, lets select non-redundant prediates into a working
* list; we'll work with the workingPredicates list from now on in this
* routine.
*/
PredicateList workingPredicates = new PredicateList(getContextManager());
for (int i = 0; i < numPredicates; i++) {
if (isRedundantPredicate(i))
continue;
/* to workingPredicates only add useful predicates... */
workingPredicates.addOptPredicate(elementAt(i));
}
int numWorkingPredicates = workingPredicates.size();
/*--------------------------------------------------------------------
* In the first phase, the routine initializes an array of
* predicateWrapperLists-- one list for each conglomerate that has
* statistics.
*
* predsForConglomerates is an array of pwList. For each conglomerate we
* keep a pwList of predicates that have an equals predicate on a column
* in the conglomerate.
*
* As an example consider a table T, with indices on
* T(c1)-->conglom_one, T(c2,c1)-->conglom_two.
*
* if we have the following predicates:
* T.c1=T1.x (p1) and T.c1=T2.y (p2) and T.c2=T1.z (p3), then we'll have the
* after the first loop is done, we'll have the following setup.
*
* conglom_one: pwList [p1,p2]
* conglom_two: pwList [p1,p2,p3].
*
* Note that although p1,p2 appear on both conglomerates, the
* indexPosition of p1 and p2 on the first list is 0 (first index
* position) while the index position of p1,p2 on the second list is 1
* (second index position).
*
* PredicateWrapper and PredicateWrapperLists are inner classes used
* only in this file.
* -------------------------------------------------------------------- */
PredicateWrapperList[] predsForConglomerates = new PredicateWrapperList[numConglomerates];
for (int i = 0; i < numConglomerates; i++) {
ConglomerateDescriptor cd = conglomerates[i];
if (!cd.isIndex())
continue;
if (!td.statisticsExist(cd))
continue;
int[] baseColumnList = cd.getIndexDescriptor().baseColumnPositions();
for (int j = 0; j < numWorkingPredicates; j++) {
Predicate pred = workingPredicates.elementAt(j);
int ip = pred.hasEqualOnColumnList(baseColumnList, optTable);
if (ip < 0)
// look at the next predicate.
continue;
nothingYet = false;
if (predsForConglomerates[i] == null) {
predsForConglomerates[i] = new PredicateWrapperList(numWorkingPredicates);
}
PredicateWrapper newpw = new PredicateWrapper(ip, pred, j);
predsForConglomerates[i].insert(newpw);
}
// for (j = 0;
}
if (nothingYet) {
return -1.0;
}
/*------------------------------------------------------------------
* In the second phase we,
* walk the predsForConglomerateList again-- if we find
* a break in the indexPositions we remove the predicates
* after the gap. To clarify, if we have equal predicates on the first
* and the third index positions, we can throw away the predicate on
* the 3rd index position-- it doesn't do us any good.
*-------------------------------------------------------------------*/
int maxOverlap = -1;
for (int i = 0; i < numConglomerates; i++) {
if (predsForConglomerates[i] == null)
continue;
predsForConglomerates[i].retainLeadingContiguous();
}
// for (i = 0; i < ...)
calculateWeight(predsForConglomerates, numWorkingPredicates);
/*-------------------------------------------------------------------
* In the third phase we loop through predsForConglomerates choosing the
* best fit (chooseLongestMatch) of predicates. we use the statistics
* for the set of predicates returned by chooseLongestMatch and then
* loop until we can't find any more statistics or we have exhausted all
* the predicates for which we are trying to find statistics.
*--------------------------------------------------------------------*/
double selectivity = 1.0;
ArrayList<Predicate> maxPreds = new ArrayList<Predicate>();
while (true) {
maxPreds.clear();
int conglomIndex = chooseLongestMatch(predsForConglomerates, maxPreds, numWorkingPredicates);
if (conglomIndex == -1)
// no more stats available.
break;
selectivity *= td.selectivityForConglomerate(conglomerates[conglomIndex], maxPreds.size());
for (int i = 0; i < maxPreds.size(); i++) {
/* remove the predicates that we've calculated the selectivity
* of, from workingPredicates.
*/
Predicate p = maxPreds.get(i);
workingPredicates.removeOptPredicate(p);
}
if (workingPredicates.size() == 0)
break;
}
if (workingPredicates.size() != 0) {
selectivity *= workingPredicates.selectivityNoStatistics(optTable);
}
return selectivity;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class UnaryComparisonOperatorNode method getAbsoluteColumnPosition.
/**
* Get the absolute 0-based column position of the ColumnReference from
* the conglomerate for this Optimizable.
*
* @param optTable The Optimizable
*
* @return The absolute 0-based column position of the ColumnReference
*/
private int getAbsoluteColumnPosition(Optimizable optTable) {
ColumnReference cr = (ColumnReference) operand;
int columnPosition;
ConglomerateDescriptor bestCD;
/* Column positions are one-based, store is zero-based */
columnPosition = cr.getSource().getColumnPosition();
bestCD = optTable.getTrulyTheBestAccessPath().getConglomerateDescriptor();
/*
** If it's an index, find the base column position in the index
** and translate it to an index column position.
*/
if (bestCD.isIndex()) {
columnPosition = bestCD.getIndexDescriptor().getKeyColumnPosition(columnPosition);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(columnPosition > 0, "Base column not found in index");
}
}
// return the 0-based column position
return columnPosition - 1;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class UpdateNode method getReadMap.
/**
* Gets the map of all columns which must be read out of the base table.
* These are the columns needed to<UL>:
* <LI>maintain indices</LI>
* <LI>maintain foreign keys</LI>
* <LI>maintain generated columns</LI>
* <LI>support Replication's Delta Optimization</LI></UL>
* <p>
* The returned map is a FormatableBitSet with 1 bit for each column in the
* table plus an extra, unsued 0-bit. If a 1-based column id must
* be read from the base table, then the corresponding 1-based bit
* is turned ON in the returned FormatableBitSet.
* <p>
* <B>NOTE</B>: this method is not expected to be called when
* all columns are being updated (i.e. updateColumnList is null).
*
* @param dd the data dictionary to look in
* @param baseTable the base table descriptor
* @param updateColumnList the rcl for the update. CANNOT BE NULL
* @param affectedGeneratedColumns columns whose generation clauses mention columns being updated
*
* @return a FormatableBitSet of columns to be read out of the base table
*
* @exception StandardException Thrown on error
*/
FormatableBitSet getReadMap(DataDictionary dd, TableDescriptor baseTable, ResultColumnList updateColumnList, ColumnDescriptorList affectedGeneratedColumns) throws StandardException {
boolean[] needsDeferredProcessing = new boolean[1];
needsDeferredProcessing[0] = requiresDeferredProcessing();
ArrayList<ConglomerateDescriptor> conglomerates = new ArrayList<ConglomerateDescriptor>();
relevantCdl = new ConstraintDescriptorList();
relevantTriggers = new TriggerDescriptorList();
FormatableBitSet columnMap = getUpdateReadMap(dd, baseTable, updateColumnList, conglomerates, relevantCdl, relevantTriggers, needsDeferredProcessing, affectedGeneratedColumns);
markAffectedIndexes(conglomerates);
adjustDeferredFlag(needsDeferredProcessing[0]);
return columnMap;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class SYSCONSTRAINTSRowFactory method buildDescriptor.
// /////////////////////////////////////////////////////////////////////////
//
// ABSTRACT METHODS TO BE IMPLEMENTED BY CHILDREN OF CatalogRowFactory
//
// /////////////////////////////////////////////////////////////////////////
/**
* Make a ConstraintDescriptor out of a SYSCONSTRAINTS row
*
* @param row a SYSCONSTRAINTS row
* @param parentTupleDescriptor Subconstraint descriptor with auxiliary info.
* @param dd dataDictionary
*
* @exception StandardException thrown on failure
*/
public TupleDescriptor buildDescriptor(ExecRow row, TupleDescriptor parentTupleDescriptor, DataDictionary dd) throws StandardException {
ConstraintDescriptor constraintDesc = null;
if (SanityManager.DEBUG) {
SanityManager.ASSERT(row.nColumns() == SYSCONSTRAINTS_COLUMN_COUNT, "Wrong number of columns for a SYSCONSTRAINTS row");
}
DataValueDescriptor col;
ConglomerateDescriptor conglomDesc;
DataDescriptorGenerator ddg;
TableDescriptor td = null;
int constraintIType = -1;
int[] keyColumns = null;
UUID constraintUUID;
UUID schemaUUID;
UUID tableUUID;
UUID referencedConstraintId = null;
SchemaDescriptor schema;
String tableUUIDString;
String constraintName;
String constraintSType;
String constraintStateStr;
boolean deferrable = ConstraintDefinitionNode.DEFERRABLE_DEFAULT;
boolean initiallyDeferred = ConstraintDefinitionNode.INITIALLY_DEFERRED_DEFAULT;
boolean enforced = ConstraintDefinitionNode.ENFORCED_DEFAULT;
int referenceCount;
String constraintUUIDString;
String schemaUUIDString;
SubConstraintDescriptor scd;
if (SanityManager.DEBUG) {
if (!(parentTupleDescriptor instanceof SubConstraintDescriptor)) {
SanityManager.THROWASSERT("parentTupleDescriptor expected to be instanceof " + "SubConstraintDescriptor, not " + parentTupleDescriptor.getClass().getName());
}
}
scd = (SubConstraintDescriptor) parentTupleDescriptor;
ddg = dd.getDataDescriptorGenerator();
/* 1st column is CONSTRAINTID (UUID - char(36)) */
col = row.getColumn(SYSCONSTRAINTS_CONSTRAINTID);
constraintUUIDString = col.getString();
constraintUUID = getUUIDFactory().recreateUUID(constraintUUIDString);
/* 2nd column is TABLEID (UUID - char(36)) */
col = row.getColumn(SYSCONSTRAINTS_TABLEID);
tableUUIDString = col.getString();
tableUUID = getUUIDFactory().recreateUUID(tableUUIDString);
/* Get the TableDescriptor.
* It may be cached in the SCD,
* otherwise we need to go to the
* DD.
*/
if (scd != null) {
td = scd.getTableDescriptor();
}
if (td == null) {
td = dd.getTableDescriptor(tableUUID);
}
/* 3rd column is NAME (varchar(128)) */
col = row.getColumn(SYSCONSTRAINTS_CONSTRAINTNAME);
constraintName = col.getString();
/* 4th column is TYPE (char(1)) */
col = row.getColumn(SYSCONSTRAINTS_TYPE);
constraintSType = col.getString();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(constraintSType.length() == 1, "Fourth column type incorrect");
}
boolean typeSet = false;
switch(constraintSType.charAt(0)) {
case 'P':
constraintIType = DataDictionary.PRIMARYKEY_CONSTRAINT;
typeSet = true;
case 'U':
if (!typeSet) {
constraintIType = DataDictionary.UNIQUE_CONSTRAINT;
typeSet = true;
}
case 'F':
if (!typeSet)
constraintIType = DataDictionary.FOREIGNKEY_CONSTRAINT;
if (SanityManager.DEBUG) {
if (!(parentTupleDescriptor instanceof SubKeyConstraintDescriptor)) {
SanityManager.THROWASSERT("parentTupleDescriptor expected to be instanceof " + "SubKeyConstraintDescriptor, not " + parentTupleDescriptor.getClass().getName());
}
}
conglomDesc = td.getConglomerateDescriptor(((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId());
/* Take care the rare case of conglomDesc being null. The
* reason is that our "td" is out of date. Another thread
* which was adding a constraint committed between the moment
* we got the table descriptor (conglomerate list) and the
* moment we scanned and got the constraint desc list. Since
* that thread just added a new row to SYSCONGLOMERATES,
* SYSCONSTRAINTS, etc. We wouldn't have wanted to lock the
* system tables just to prevent other threads from adding new
* rows.
*/
if (conglomDesc == null) {
// we can't be getting td from cache because if we are
// here, we must have been in dd's ddl mode (that's why
// the ddl thread went through), we are not done yet, the
// dd ref count is not 0, hence it couldn't have turned
// into COMPILE_ONLY mode
td = dd.getTableDescriptor(tableUUID);
if (scd != null)
scd.setTableDescriptor(td);
// try again now
conglomDesc = td.getConglomerateDescriptor(((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId());
}
if (SanityManager.DEBUG) {
SanityManager.ASSERT(conglomDesc != null, "conglomDesc is expected to be non-null for backing index");
}
keyColumns = conglomDesc.getIndexDescriptor().baseColumnPositions();
referencedConstraintId = ((SubKeyConstraintDescriptor) parentTupleDescriptor).getKeyConstraintId();
keyColumns = conglomDesc.getIndexDescriptor().baseColumnPositions();
break;
case 'C':
constraintIType = DataDictionary.CHECK_CONSTRAINT;
if (SanityManager.DEBUG) {
if (!(parentTupleDescriptor instanceof SubCheckConstraintDescriptor)) {
SanityManager.THROWASSERT("parentTupleDescriptor expected to be instanceof " + "SubCheckConstraintDescriptor, not " + parentTupleDescriptor.getClass().getName());
}
}
break;
default:
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("Fourth column value invalid");
}
}
/* 5th column is SCHEMAID (UUID - char(36)) */
col = row.getColumn(SYSCONSTRAINTS_SCHEMAID);
schemaUUIDString = col.getString();
schemaUUID = getUUIDFactory().recreateUUID(schemaUUIDString);
schema = dd.getSchemaDescriptor(schemaUUID, null);
/* 6th column is STATE (char(1)) */
col = row.getColumn(SYSCONSTRAINTS_STATE);
constraintStateStr = col.getString();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(constraintStateStr.length() == 1, "Sixth column (state) type incorrect");
}
//
switch(constraintStateStr.charAt(0)) {
case 'E':
deferrable = false;
initiallyDeferred = false;
enforced = true;
break;
case 'D':
deferrable = false;
initiallyDeferred = false;
enforced = false;
break;
case 'e':
deferrable = true;
initiallyDeferred = true;
enforced = true;
break;
case 'd':
deferrable = true;
initiallyDeferred = true;
enforced = false;
break;
case 'i':
deferrable = true;
initiallyDeferred = false;
enforced = true;
break;
case 'j':
deferrable = true;
initiallyDeferred = false;
enforced = false;
break;
default:
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("Invalidate state value '" + constraintStateStr + "' for constraint");
}
}
/* 7th column is REFERENCECOUNT, boolean */
col = row.getColumn(SYSCONSTRAINTS_REFERENCECOUNT);
referenceCount = col.getInt();
switch(constraintIType) {
case DataDictionary.PRIMARYKEY_CONSTRAINT:
constraintDesc = ddg.newPrimaryKeyConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, // genReferencedColumns(dd, td), //int referencedColumns[],
keyColumns, constraintUUID, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(), schema, enforced, referenceCount);
break;
case DataDictionary.UNIQUE_CONSTRAINT:
constraintDesc = ddg.newUniqueConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, // genReferencedColumns(dd, td), //int referencedColumns[],
keyColumns, constraintUUID, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(), schema, enforced, referenceCount);
break;
case DataDictionary.FOREIGNKEY_CONSTRAINT:
if (SanityManager.DEBUG) {
SanityManager.ASSERT(referenceCount == 0, "REFERENCECOUNT column is nonzero for fk constraint");
}
constraintDesc = ddg.newForeignKeyConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, // genReferencedColumns(dd, td), //int referencedColumns[],
keyColumns, constraintUUID, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(), schema, referencedConstraintId, enforced, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getRaDeleteRule(), ((SubKeyConstraintDescriptor) parentTupleDescriptor).getRaUpdateRule());
break;
case DataDictionary.CHECK_CONSTRAINT:
if (SanityManager.DEBUG) {
SanityManager.ASSERT(referenceCount == 0, "REFERENCECOUNT column is nonzero for check constraint");
}
constraintDesc = ddg.newCheckConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, constraintUUID, ((SubCheckConstraintDescriptor) parentTupleDescriptor).getConstraintText(), ((SubCheckConstraintDescriptor) parentTupleDescriptor).getReferencedColumnsDescriptor(), schema, enforced);
break;
}
return constraintDesc;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class AlterTableNode method bindStatement.
// We inherit the generate() method from DDLStatementNode.
/**
* Bind this AlterTableNode. This means doing any static error
* checking that can be done before actually creating the table.
* For example, verifying that the user is not trying to add a
* non-nullable column.
*
* @exception StandardException Thrown on error
*/
@Override
public void bindStatement() throws StandardException {
DataDictionary dd = getDataDictionary();
int numCheckConstraints = 0;
int numReferenceConstraints = 0;
int numGenerationClauses = 0;
int numBackingIndexes = 0;
/*
** Get the table descriptor. Checks the schema
** and the table.
*/
if (compressTable && (purge || defragment || truncateEndOfTable)) {
// We are dealing with inplace compress here and inplace compress is
// allowed on system schemas. In order to support inplace compress
// on user as well as system tables, we need to use special
// getTableDescriptor(boolean) call to get TableDescriptor. This
// getTableDescriptor(boolean) allows getting TableDescriptor for
// system tables without throwing an exception.
baseTable = getTableDescriptor(false);
} else
baseTable = getTableDescriptor();
// throw an exception if user is attempting to alter a temporary table
if (baseTable.getTableType() == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
throw StandardException.newException(SQLState.LANG_NOT_ALLOWED_FOR_DECLARED_GLOBAL_TEMP_TABLE);
}
/* Statement is dependent on the TableDescriptor */
getCompilerContext().createDependency(baseTable);
// The collation derivation of such a column would be "implicit".
if (changeType == ADD_TYPE) {
// the action is of type add.
if (tableElementList != null) {
// check if is is add column
for (int i = 0; i < tableElementList.size(); i++) {
if (tableElementList.elementAt(i) instanceof ColumnDefinitionNode) {
ColumnDefinitionNode cdn = (ColumnDefinitionNode) tableElementList.elementAt(i);
if (cdn.hasGenerationClause() && (cdn.getType() == null)) {
continue;
}
if (cdn.getType() == null) {
throw StandardException.newException(SQLState.LANG_NEEDS_DATATYPE, cdn.getColumnName());
}
if (cdn.getType().getTypeId().isStringTypeId()) {
// we found what we are looking for. Set the
// collation type of this column to be the same as
// schema descriptor's collation. Set the collation
// derivation as implicit
cdn.setCollationType(schemaDescriptor.getCollationType());
}
}
}
}
}
if (tableElementList != null) {
tableElementList.validate(this, dd, baseTable);
/* Only 1012 columns allowed per table */
if ((tableElementList.countNumberOfColumns() + baseTable.getNumberOfColumns()) > Limits.DB2_MAX_COLUMNS_IN_TABLE) {
throw StandardException.newException(SQLState.LANG_TOO_MANY_COLUMNS_IN_TABLE_OR_VIEW, String.valueOf(tableElementList.countNumberOfColumns() + baseTable.getNumberOfColumns()), getRelativeName(), String.valueOf(Limits.DB2_MAX_COLUMNS_IN_TABLE));
}
/* Number of backing indexes in the alter table statment */
numBackingIndexes = tableElementList.countConstraints(DataDictionary.PRIMARYKEY_CONSTRAINT) + tableElementList.countConstraints(DataDictionary.FOREIGNKEY_CONSTRAINT) + tableElementList.countConstraints(DataDictionary.UNIQUE_CONSTRAINT);
/* Check the validity of all check constraints */
numCheckConstraints = tableElementList.countConstraints(DataDictionary.CHECK_CONSTRAINT);
numReferenceConstraints = tableElementList.countConstraints(DataDictionary.FOREIGNKEY_CONSTRAINT);
numGenerationClauses = tableElementList.countGenerationClauses();
}
// so far is more than 32767, then we need to throw an exception
if ((numBackingIndexes + baseTable.getTotalNumberOfIndexes()) > Limits.DB2_MAX_INDEXES_ON_TABLE) {
throw StandardException.newException(SQLState.LANG_TOO_MANY_INDEXES_ON_TABLE, String.valueOf(numBackingIndexes + baseTable.getTotalNumberOfIndexes()), getRelativeName(), String.valueOf(Limits.DB2_MAX_INDEXES_ON_TABLE));
}
if ((numCheckConstraints > 0) || (numGenerationClauses > 0) || (numReferenceConstraints > 0)) {
/* In order to check the validity of the check constraints and
* generation clauses
* we must goober up a FromList containing a single table,
* the table being alter, with an RCL containing the existing and
* new columns and their types. This will allow us to
* bind the constraint definition trees against that
* FromList. When doing this, we verify that there are
* no nodes which can return non-deterministic results.
*/
FromList fromList = makeFromList(dd, tableElementList, false);
FormatableBitSet generatedColumns = baseTable.makeColumnMap(baseTable.getGeneratedColumns());
/* Now that we've finally goobered stuff up, bind and validate
* the check constraints and generation clauses.
*/
if (numGenerationClauses > 0) {
tableElementList.bindAndValidateGenerationClauses(schemaDescriptor, fromList, generatedColumns, baseTable);
}
if (numCheckConstraints > 0) {
tableElementList.bindAndValidateCheckConstraints(fromList);
}
if (numReferenceConstraints > 0) {
tableElementList.validateForeignKeysOnGenerationClauses(fromList, generatedColumns);
}
}
// must be done after resolving the datatypes of the generation clauses
if (tableElementList != null) {
tableElementList.validatePrimaryKeyNullability();
}
// index. If yes, then verify that the indexname provided is a valid one.
if ((updateStatistics && !updateStatisticsAll) || (dropStatistics && !dropStatisticsAll)) {
ConglomerateDescriptor cd = null;
if (schemaDescriptor.getUUID() != null)
cd = dd.getConglomerateDescriptor(indexNameForStatistics, schemaDescriptor, false);
if (cd == null) {
throw StandardException.newException(SQLState.LANG_INDEX_NOT_FOUND, schemaDescriptor.getSchemaName() + "." + indexNameForStatistics);
}
}
/* Unlike most other DDL, we will make this ALTER TABLE statement
* dependent on the table being altered. In general, we try to
* avoid this for DDL, but we are already requiring the table to
* exist at bind time (not required for create index) and we don't
* want the column ids to change out from under us before
* execution.
*/
getCompilerContext().createDependency(baseTable);
}
Aggregations