Search in sources :

Example 26 with ConstraintDescriptor

use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.

the class DropConstraintConstantAction method executeConstantAction.

// INTERFACE METHODS
/**
 *	This is the guts of the Execution-time logic for DROP CONSTRAINT.
 *
 *	@see ConstantAction#executeConstantAction
 *
 * @exception StandardException		Thrown on failure
 */
public void executeConstantAction(Activation activation) throws StandardException {
    ConstraintDescriptor conDesc = null;
    TableDescriptor td;
    UUID indexId = null;
    String indexUUIDString;
    LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
    DataDictionary dd = lcc.getDataDictionary();
    DependencyManager dm = dd.getDependencyManager();
    TransactionController tc = lcc.getTransactionExecute();
    /*
		** Inform the data dictionary that we are about to write to it.
		** There are several calls to data dictionary "get" methods here
		** that might be done in "read" mode in the data dictionary, but
		** it seemed safer to do this whole operation in "write" mode.
		**
		** We tell the data dictionary we're done writing at the end of
		** the transaction.
		*/
    dd.startWriting(lcc);
    td = dd.getTableDescriptor(tableId);
    if (td == null) {
        throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tableName);
    }
    /* Table gets locked in AlterTableConstantAction */
    /*
		** If the schema descriptor is null, then
		** we must have just read ourselves in.  
		** So we will get the corresponding schema
		** descriptor from the data dictionary.
		*/
    SchemaDescriptor tdSd = td.getSchemaDescriptor();
    SchemaDescriptor constraintSd = constraintSchemaName == null ? tdSd : dd.getSchemaDescriptor(constraintSchemaName, tc, true);
    /* Get the constraint descriptor for the index, along
		 * with an exclusive row lock on the row in sys.sysconstraints
		 * in order to ensure that no one else compiles against the
		 * index.
		 */
    if (// this means "alter table drop primary key"
    constraintName == null)
        conDesc = dd.getConstraintDescriptors(td).getPrimaryKey();
    else
        conDesc = dd.getConstraintDescriptorByName(td, constraintSd, constraintName, true);
    // Error if constraint doesn't exist
    if (conDesc == null) {
        String errorName = constraintName == null ? "PRIMARY KEY" : (constraintSd.getSchemaName() + "." + constraintName);
        throw StandardException.newException(SQLState.LANG_DROP_OR_ALTER_NON_EXISTING_CONSTRAINT, errorName, td.getQualifiedName());
    }
    switch(verifyType) {
        case DataDictionary.UNIQUE_CONSTRAINT:
            if (conDesc.getConstraintType() != verifyType)
                throw StandardException.newException(SQLState.LANG_DROP_CONSTRAINT_TYPE, constraintName, "UNIQUE");
            break;
        case DataDictionary.CHECK_CONSTRAINT:
            if (conDesc.getConstraintType() != verifyType)
                throw StandardException.newException(SQLState.LANG_DROP_CONSTRAINT_TYPE, constraintName, "CHECK");
            break;
        case DataDictionary.FOREIGNKEY_CONSTRAINT:
            if (conDesc.getConstraintType() != verifyType)
                throw StandardException.newException(SQLState.LANG_DROP_CONSTRAINT_TYPE, constraintName, "FOREIGN KEY");
            break;
    }
    boolean cascadeOnRefKey = (cascade && conDesc instanceof ReferencedKeyConstraintDescriptor);
    if (!cascadeOnRefKey) {
        dm.invalidateFor(conDesc, DependencyManager.DROP_CONSTRAINT, lcc);
    }
    /*
		** If we had a primary/unique key and it is drop cascade,	
		** drop all the referencing keys now.  We MUST do this AFTER
		** dropping the referenced key because otherwise we would
		** be repeatedly changing the reference count of the referenced
		** key and generating unnecessary I/O.
		*/
    dropConstraint(conDesc, activation, lcc, !cascadeOnRefKey);
    if (cascadeOnRefKey) {
        ForeignKeyConstraintDescriptor fkcd;
        ReferencedKeyConstraintDescriptor cd;
        ConstraintDescriptorList cdl;
        cd = (ReferencedKeyConstraintDescriptor) conDesc;
        cdl = cd.getForeignKeyConstraints(ReferencedKeyConstraintDescriptor.ALL);
        int cdlSize = cdl.size();
        for (int index = 0; index < cdlSize; index++) {
            fkcd = (ForeignKeyConstraintDescriptor) cdl.elementAt(index);
            dm.invalidateFor(fkcd, DependencyManager.DROP_CONSTRAINT, lcc);
            dropConstraint(fkcd, activation, lcc, true);
        }
        /*
			** We told dropConstraintAndIndex not to
			** remove our dependencies, so send an invalidate,
			** and drop the dependencies.
			*/
        dm.invalidateFor(conDesc, DependencyManager.DROP_CONSTRAINT, lcc);
        dm.clearDependencies(lcc, conDesc);
    }
}
Also used : SchemaDescriptor(org.apache.derby.iapi.sql.dictionary.SchemaDescriptor) DependencyManager(org.apache.derby.iapi.sql.depend.DependencyManager) ConstraintDescriptorList(org.apache.derby.iapi.sql.dictionary.ConstraintDescriptorList) DataDictionary(org.apache.derby.iapi.sql.dictionary.DataDictionary) ForeignKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor) TableDescriptor(org.apache.derby.iapi.sql.dictionary.TableDescriptor) LanguageConnectionContext(org.apache.derby.iapi.sql.conn.LanguageConnectionContext) ForeignKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor) ReferencedKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ReferencedKeyConstraintDescriptor) ConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor) ReferencedKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ReferencedKeyConstraintDescriptor) UUID(org.apache.derby.catalog.UUID) TransactionController(org.apache.derby.iapi.store.access.TransactionController)

Example 27 with ConstraintDescriptor

use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.

the class UpdateNode method getUpdateReadMap.

/**
 *	Builds a bitmap of all columns which should be read from the
 *	Store in order to satisfy an UPDATE statement.
 *
 *	Is passed a list of updated columns. Does the following:
 *
 *	1)	finds all indices which overlap the updated columns
 *	2)	adds the index columns to a bitmap of affected columns
 *	3)	adds the index descriptors to a list of conglomerate
 *		descriptors.
 *	4)	finds all constraints which overlap the updated columns
 *		and adds the constrained columns to the bitmap
 *	5)	finds all triggers which overlap the updated columns.
 *	6)	Go through all those triggers from step 5 and for each one of
 *     those triggers, follow the rules below to decide which columns
 *     should be read.
 *       Rule1)If trigger column information is null, then read all the
 *       columns from trigger table into memory irrespective of whether
 *       there is any trigger action column information. 2 egs of such
 *       triggers
 *         create trigger tr1 after update on t1 for each row values(1);
 *         create trigger tr1 after update on t1 referencing old as oldt
 *         	for each row insert into t2 values(2,oldt.j,-2);
 *       Rule2)If trigger column information is available but no trigger
 *       action column information is found and no REFERENCES clause is
 *       used for the trigger, then read all the columns identified by
 *       the trigger column. eg
 *         create trigger tr1 after update of c1 on t1
 *         	for each row values(1);
 *       Rule3)If trigger column information and trigger action column
 *       information both are not null, then only those columns will be
 *       read into memory. This is possible only for triggers created in
 *       release 10.9 or higher(with the exception of 10.7.1.1 where we
 *       did collect that information but because of corruption caused
 *       by those changes, we do not use the information collected by
 *       10.7). Starting 10.9, we are collecting trigger action column
 *       informatoin so we can be smart about what columns get read
 *       during trigger execution. eg
 *         create trigger tr1 after update of c1 on t1
 *         	referencing old as oldt for each row
 *         	insert into t2 values(2,oldt.j,-2);
 *       Rule4)If trigger column information is available but no trigger
 *       action column information is found but REFERENCES clause is used
 *       for the trigger, then read all the columns from the trigger
 *       table. This will cover soft-upgrade scenario for triggers created
 *       pre-10.9.
 *       eg trigger created prior to 10.9
 *         create trigger tr1 after update of c1 on t1
 *         	referencing old as oldt for each row
 *         	insert into t2 values(2,oldt.j,-2);
 *	7)	adds the triggers to an evolving list of triggers
 *	8)	finds all generated columns whose generation clauses mention
 *        the updated columns and adds all of the mentioned columns
 *
 *	@param	dd	Data Dictionary
 *	@param	baseTable	Table on which update is issued
 *	@param	updateColumnList	a list of updated columns
 * @param  conglomerates       OUT: list of affected indices
 *	@param	relevantConstraints	IN/OUT. Empty list is passed in. We hang constraints on it as we go.
 *	@param	relevantTriggers	IN/OUT. Passed in as an empty list. Filled in as we go.
 *	@param	needsDeferredProcessing	IN/OUT. true if the statement already needs
 *									deferred processing. set while evaluating this
 *									routine if a trigger or constraint requires
 *									deferred processing
 *	@param	affectedGeneratedColumns columns whose generation clauses mention updated columns
 *
 * @return a FormatableBitSet of columns to be read out of the base table
 *
 * @exception StandardException		Thrown on error
 */
static FormatableBitSet getUpdateReadMap(DataDictionary dd, TableDescriptor baseTable, ResultColumnList updateColumnList, List<ConglomerateDescriptor> conglomerates, ConstraintDescriptorList relevantConstraints, TriggerDescriptorList relevantTriggers, boolean[] needsDeferredProcessing, ColumnDescriptorList affectedGeneratedColumns) throws StandardException {
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(updateColumnList != null, "updateColumnList is null");
    }
    int columnCount = baseTable.getMaxColumnID();
    FormatableBitSet columnMap = new FormatableBitSet(columnCount + 1);
    /*
		** Add all the changed columns.  We don't strictly
		** need the before image of the changed column in all cases,
		** but it makes life much easier since things are set
		** up around the assumption that we have the before
		** and after image of the column.
		*/
    int[] changedColumnIds = updateColumnList.sortMe();
    for (int ix = 0; ix < changedColumnIds.length; ix++) {
        columnMap.set(changedColumnIds[ix]);
    }
    /* 
		** Get a list of the indexes that need to be 
		** updated.  ColumnMap contains all indexed
		** columns where 1 or more columns in the index
		** are going to be modified.
		*/
    DMLModStatementNode.getXAffectedIndexes(baseTable, updateColumnList, columnMap, conglomerates);
    /* 
		** Add all columns needed for constraints.  We don't
		** need to bother with foreign key/primary key constraints
		** because they are added as a side effect of adding
		** their indexes above.
		*/
    baseTable.getAllRelevantConstraints(StatementType.UPDATE, changedColumnIds, needsDeferredProcessing, relevantConstraints);
    int rclSize = relevantConstraints.size();
    for (int index = 0; index < rclSize; index++) {
        ConstraintDescriptor cd = relevantConstraints.elementAt(index);
        if (cd.getConstraintType() != DataDictionary.CHECK_CONSTRAINT) {
            continue;
        }
        int[] refColumns = ((CheckConstraintDescriptor) cd).getReferencedColumns();
        for (int i = 0; i < refColumns.length; i++) {
            columnMap.set(refColumns[i]);
        }
    }
    // 
    // Add all columns mentioned by generation clauses which are affected
    // by the columns being updated.
    // 
    addGeneratedColumnPrecursors(baseTable, affectedGeneratedColumns, columnMap);
    /*
	 	* If we have any UPDATE triggers, then we will follow the 4 rules
	 	* mentioned in the comments at the method level.
	 	*/
    baseTable.getAllRelevantTriggers(StatementType.UPDATE, changedColumnIds, relevantTriggers);
    if (relevantTriggers.size() > 0) {
        needsDeferredProcessing[0] = true;
        boolean needToIncludeAllColumns = false;
        // If we are dealing with database created in 10.8 and prior,
        // then we must be in soft upgrade mode. For such databases,
        // we do not want to do any column reading optimization.
        // 
        // For triggers created in 10.7.1.1, we kept track of trigger
        // action columns used through the REFERENCING clause. That
        // information was gathered so we could be smart about what
        // columns from trigger table should be read during trigger
        // execution. But those changes in code resulted in data
        // corruption DERBY-5121. Because of that, we took out the
        // column read optimization changes from codeline for next
        // release of 10.7 and 10.8 codeline.
        // But we can still have triggers created in 10.7.1.1 with
        // trigger action column information in SYSTRIGGERS.
        // In 10.9, we are reimplementing what columns should be read
        // from the trigger table during trigger execution. But we do
        // not want this column optimization changes to be used in soft
        // upgrade mode for a 10.8 or prior database so that we can
        // go back to the older release if that's what the user chooses
        // after the soft-upgrade.
        boolean in10_9_orHigherVersion = dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_9, null);
        for (TriggerDescriptor trd : relevantTriggers) {
            if (in10_9_orHigherVersion) {
                // See if we can avoid reading all the columns from the
                // trigger table.
                int[] referencedColsInTriggerAction = trd.getReferencedColsInTriggerAction();
                int[] triggerCols = trd.getReferencedCols();
                if (triggerCols == null || triggerCols.length == 0) {
                    for (int i = 0; i < columnCount; i++) {
                        columnMap.set(i + 1);
                    }
                    // going to read all the columns anyways.
                    break;
                } else {
                    if (referencedColsInTriggerAction == null || referencedColsInTriggerAction.length == 0) {
                        // Does this trigger have REFERENCING clause defined on it
                        if (!trd.getReferencingNew() && !trd.getReferencingOld()) {
                            // trigger columns
                            for (int ix = 0; ix < triggerCols.length; ix++) {
                                columnMap.set(triggerCols[ix]);
                            }
                        } else {
                            // The trigger has REFERENCING clause defined on it
                            // so it might be used them in trigger action.
                            // We should just go ahead and read all the
                            // columns from the trigger table. Now, there is
                            // no need to go through the rest of the triggers
                            // because we are going to read all the columns
                            // anyways.
                            needToIncludeAllColumns = true;
                            break;
                        }
                    } else {
                        // trigger table for the trigger execution.
                        for (int ix = 0; ix < triggerCols.length; ix++) {
                            columnMap.set(triggerCols[ix]);
                        }
                        for (int ix = 0; ix < referencedColsInTriggerAction.length; ix++) {
                            columnMap.set(referencedColsInTriggerAction[ix]);
                        }
                    }
                }
            } else {
                // Does this trigger have REFERENCING clause defined on it
                if (!trd.getReferencingNew() && !trd.getReferencingOld())
                    continue;
                else {
                    needToIncludeAllColumns = true;
                    break;
                }
            }
        }
        if (needToIncludeAllColumns) {
            for (int i = 1; i <= columnCount; i++) {
                columnMap.set(i);
            }
        }
    }
    return columnMap;
}
Also used : CheckConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.CheckConstraintDescriptor) ConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor) FormatableBitSet(org.apache.derby.iapi.services.io.FormatableBitSet) CheckConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.CheckConstraintDescriptor) TriggerDescriptor(org.apache.derby.iapi.sql.dictionary.TriggerDescriptor)

Example 28 with ConstraintDescriptor

use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.

the class RenameNode method renameTableBind.

// do any checking needs to be done at bind time for rename table
private void renameTableBind(DataDictionary dd) throws StandardException {
    /* Verify that there are no check constraints on the table */
    ConstraintDescriptorList constraintDescriptorList = dd.getConstraintDescriptors(td);
    int size = constraintDescriptorList == null ? 0 : constraintDescriptorList.size();
    ConstraintDescriptor constraintDescriptor;
    // go through all the constraints defined on the table
    for (int index = 0; index < size; index++) {
        constraintDescriptor = constraintDescriptorList.elementAt(index);
        // if it is a check constraint, error
        if (constraintDescriptor.getConstraintType() == DataDictionary.CHECK_CONSTRAINT) {
            throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, "RENAME", td.getName(), "CONSTRAINT", constraintDescriptor.getConstraintName());
        }
    }
}
Also used : ConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor) ConstraintDescriptorList(org.apache.derby.iapi.sql.dictionary.ConstraintDescriptorList)

Example 29 with ConstraintDescriptor

use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.

the class SYSCONSTRAINTSRowFactory method buildDescriptor.

// /////////////////////////////////////////////////////////////////////////
// 
// ABSTRACT METHODS TO BE IMPLEMENTED BY CHILDREN OF CatalogRowFactory
// 
// /////////////////////////////////////////////////////////////////////////
/**
 * Make a ConstraintDescriptor out of a SYSCONSTRAINTS row
 *
 * @param row a SYSCONSTRAINTS row
 * @param parentTupleDescriptor	Subconstraint descriptor with auxiliary info.
 * @param dd dataDictionary
 *
 * @exception   StandardException thrown on failure
 */
public TupleDescriptor buildDescriptor(ExecRow row, TupleDescriptor parentTupleDescriptor, DataDictionary dd) throws StandardException {
    ConstraintDescriptor constraintDesc = null;
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(row.nColumns() == SYSCONSTRAINTS_COLUMN_COUNT, "Wrong number of columns for a SYSCONSTRAINTS row");
    }
    DataValueDescriptor col;
    ConglomerateDescriptor conglomDesc;
    DataDescriptorGenerator ddg;
    TableDescriptor td = null;
    int constraintIType = -1;
    int[] keyColumns = null;
    UUID constraintUUID;
    UUID schemaUUID;
    UUID tableUUID;
    UUID referencedConstraintId = null;
    SchemaDescriptor schema;
    String tableUUIDString;
    String constraintName;
    String constraintSType;
    String constraintStateStr;
    boolean deferrable = ConstraintDefinitionNode.DEFERRABLE_DEFAULT;
    boolean initiallyDeferred = ConstraintDefinitionNode.INITIALLY_DEFERRED_DEFAULT;
    boolean enforced = ConstraintDefinitionNode.ENFORCED_DEFAULT;
    int referenceCount;
    String constraintUUIDString;
    String schemaUUIDString;
    SubConstraintDescriptor scd;
    if (SanityManager.DEBUG) {
        if (!(parentTupleDescriptor instanceof SubConstraintDescriptor)) {
            SanityManager.THROWASSERT("parentTupleDescriptor expected to be instanceof " + "SubConstraintDescriptor, not " + parentTupleDescriptor.getClass().getName());
        }
    }
    scd = (SubConstraintDescriptor) parentTupleDescriptor;
    ddg = dd.getDataDescriptorGenerator();
    /* 1st column is CONSTRAINTID (UUID - char(36)) */
    col = row.getColumn(SYSCONSTRAINTS_CONSTRAINTID);
    constraintUUIDString = col.getString();
    constraintUUID = getUUIDFactory().recreateUUID(constraintUUIDString);
    /* 2nd column is TABLEID (UUID - char(36)) */
    col = row.getColumn(SYSCONSTRAINTS_TABLEID);
    tableUUIDString = col.getString();
    tableUUID = getUUIDFactory().recreateUUID(tableUUIDString);
    /* Get the TableDescriptor.  
		 * It may be cached in the SCD, 
		 * otherwise we need to go to the
		 * DD.
		 */
    if (scd != null) {
        td = scd.getTableDescriptor();
    }
    if (td == null) {
        td = dd.getTableDescriptor(tableUUID);
    }
    /* 3rd column is NAME (varchar(128)) */
    col = row.getColumn(SYSCONSTRAINTS_CONSTRAINTNAME);
    constraintName = col.getString();
    /* 4th column is TYPE (char(1)) */
    col = row.getColumn(SYSCONSTRAINTS_TYPE);
    constraintSType = col.getString();
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(constraintSType.length() == 1, "Fourth column type incorrect");
    }
    boolean typeSet = false;
    switch(constraintSType.charAt(0)) {
        case 'P':
            constraintIType = DataDictionary.PRIMARYKEY_CONSTRAINT;
            typeSet = true;
        case 'U':
            if (!typeSet) {
                constraintIType = DataDictionary.UNIQUE_CONSTRAINT;
                typeSet = true;
            }
        case 'F':
            if (!typeSet)
                constraintIType = DataDictionary.FOREIGNKEY_CONSTRAINT;
            if (SanityManager.DEBUG) {
                if (!(parentTupleDescriptor instanceof SubKeyConstraintDescriptor)) {
                    SanityManager.THROWASSERT("parentTupleDescriptor expected to be instanceof " + "SubKeyConstraintDescriptor, not " + parentTupleDescriptor.getClass().getName());
                }
            }
            conglomDesc = td.getConglomerateDescriptor(((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId());
            /* Take care the rare case of conglomDesc being null.  The
				 * reason is that our "td" is out of date.  Another thread
				 * which was adding a constraint committed between the moment
				 * we got the table descriptor (conglomerate list) and the
				 * moment we scanned and got the constraint desc list.  Since
				 * that thread just added a new row to SYSCONGLOMERATES, 
				 * SYSCONSTRAINTS, etc.  We wouldn't have wanted to lock the
				 * system tables just to prevent other threads from adding new
				 * rows.
				 */
            if (conglomDesc == null) {
                // we can't be getting td from cache because if we are
                // here, we must have been in dd's ddl mode (that's why
                // the ddl thread went through), we are not done yet, the
                // dd ref count is not 0, hence it couldn't have turned
                // into COMPILE_ONLY mode
                td = dd.getTableDescriptor(tableUUID);
                if (scd != null)
                    scd.setTableDescriptor(td);
                // try again now
                conglomDesc = td.getConglomerateDescriptor(((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId());
            }
            if (SanityManager.DEBUG) {
                SanityManager.ASSERT(conglomDesc != null, "conglomDesc is expected to be non-null for backing index");
            }
            keyColumns = conglomDesc.getIndexDescriptor().baseColumnPositions();
            referencedConstraintId = ((SubKeyConstraintDescriptor) parentTupleDescriptor).getKeyConstraintId();
            keyColumns = conglomDesc.getIndexDescriptor().baseColumnPositions();
            break;
        case 'C':
            constraintIType = DataDictionary.CHECK_CONSTRAINT;
            if (SanityManager.DEBUG) {
                if (!(parentTupleDescriptor instanceof SubCheckConstraintDescriptor)) {
                    SanityManager.THROWASSERT("parentTupleDescriptor expected to be instanceof " + "SubCheckConstraintDescriptor, not " + parentTupleDescriptor.getClass().getName());
                }
            }
            break;
        default:
            if (SanityManager.DEBUG) {
                SanityManager.THROWASSERT("Fourth column value invalid");
            }
    }
    /* 5th column is SCHEMAID (UUID - char(36)) */
    col = row.getColumn(SYSCONSTRAINTS_SCHEMAID);
    schemaUUIDString = col.getString();
    schemaUUID = getUUIDFactory().recreateUUID(schemaUUIDString);
    schema = dd.getSchemaDescriptor(schemaUUID, null);
    /* 6th column is STATE (char(1)) */
    col = row.getColumn(SYSCONSTRAINTS_STATE);
    constraintStateStr = col.getString();
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(constraintStateStr.length() == 1, "Sixth column (state) type incorrect");
    }
    // 
    switch(constraintStateStr.charAt(0)) {
        case 'E':
            deferrable = false;
            initiallyDeferred = false;
            enforced = true;
            break;
        case 'D':
            deferrable = false;
            initiallyDeferred = false;
            enforced = false;
            break;
        case 'e':
            deferrable = true;
            initiallyDeferred = true;
            enforced = true;
            break;
        case 'd':
            deferrable = true;
            initiallyDeferred = true;
            enforced = false;
            break;
        case 'i':
            deferrable = true;
            initiallyDeferred = false;
            enforced = true;
            break;
        case 'j':
            deferrable = true;
            initiallyDeferred = false;
            enforced = false;
            break;
        default:
            if (SanityManager.DEBUG) {
                SanityManager.THROWASSERT("Invalidate state value '" + constraintStateStr + "' for constraint");
            }
    }
    /* 7th column is REFERENCECOUNT, boolean */
    col = row.getColumn(SYSCONSTRAINTS_REFERENCECOUNT);
    referenceCount = col.getInt();
    switch(constraintIType) {
        case DataDictionary.PRIMARYKEY_CONSTRAINT:
            constraintDesc = ddg.newPrimaryKeyConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, // genReferencedColumns(dd, td), //int referencedColumns[],
            keyColumns, constraintUUID, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(), schema, enforced, referenceCount);
            break;
        case DataDictionary.UNIQUE_CONSTRAINT:
            constraintDesc = ddg.newUniqueConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, // genReferencedColumns(dd, td), //int referencedColumns[],
            keyColumns, constraintUUID, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(), schema, enforced, referenceCount);
            break;
        case DataDictionary.FOREIGNKEY_CONSTRAINT:
            if (SanityManager.DEBUG) {
                SanityManager.ASSERT(referenceCount == 0, "REFERENCECOUNT column is nonzero for fk constraint");
            }
            constraintDesc = ddg.newForeignKeyConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, // genReferencedColumns(dd, td), //int referencedColumns[],
            keyColumns, constraintUUID, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(), schema, referencedConstraintId, enforced, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getRaDeleteRule(), ((SubKeyConstraintDescriptor) parentTupleDescriptor).getRaUpdateRule());
            break;
        case DataDictionary.CHECK_CONSTRAINT:
            if (SanityManager.DEBUG) {
                SanityManager.ASSERT(referenceCount == 0, "REFERENCECOUNT column is nonzero for check constraint");
            }
            constraintDesc = ddg.newCheckConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, constraintUUID, ((SubCheckConstraintDescriptor) parentTupleDescriptor).getConstraintText(), ((SubCheckConstraintDescriptor) parentTupleDescriptor).getReferencedColumnsDescriptor(), schema, enforced);
            break;
    }
    return constraintDesc;
}
Also used : SchemaDescriptor(org.apache.derby.iapi.sql.dictionary.SchemaDescriptor) SubCheckConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.SubCheckConstraintDescriptor) ConglomerateDescriptor(org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor) TableDescriptor(org.apache.derby.iapi.sql.dictionary.TableDescriptor) DataDescriptorGenerator(org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator) SubConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.SubConstraintDescriptor) ConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor) SubCheckConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.SubCheckConstraintDescriptor) SubKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.SubKeyConstraintDescriptor) SubConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.SubConstraintDescriptor) SubKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.SubKeyConstraintDescriptor) DataValueDescriptor(org.apache.derby.iapi.types.DataValueDescriptor) UUID(org.apache.derby.catalog.UUID)

Example 30 with ConstraintDescriptor

use of org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor in project derby by apache.

the class InsertResultSet method setUpAllSorts.

/**
 * Set up to update all of the indexes on a table when doing a bulk insert
 * on an empty table.
 *
 * @exception StandardException					thrown on error
 */
private void setUpAllSorts(ExecRow sourceRow, RowLocation rl) throws StandardException {
    int numIndexes = constants.irgs.length;
    int numColumns = td.getNumberOfColumns();
    ordering = new ColumnOrdering[numIndexes][];
    collation = new int[numIndexes][];
    needToDropSort = new boolean[numIndexes];
    sortIds = new long[numIndexes];
    rowSources = new RowLocationRetRowSource[numIndexes];
    // indexedCols is 1-based
    indexedCols = new FormatableBitSet(numColumns + 1);
    /* For each index, build a single index row, collation templage, 
         * and a sorter. 
         */
    for (int index = 0; index < numIndexes; index++) {
        // Update the bit map of indexed columns
        int[] keyColumns = constants.irgs[index].baseColumnPositions();
        for (int i2 = 0; i2 < keyColumns.length; i2++) {
            // indexedCols is 1-based
            indexedCols.set(keyColumns[i2]);
        }
        // create a single index row template for each index
        indexRows[index] = constants.irgs[index].getIndexRowTemplate();
        // Get an index row based on the base row
        // (This call is only necessary here because we need to
        // pass a template to the sorter.)
        constants.irgs[index].getIndexRow(sourceRow, rl, indexRows[index], (FormatableBitSet) null);
        /* For non-unique indexes, we order by all columns + the RID.
			 * For unique indexes, we just order by the columns.
			 * We create a unique index observer for unique indexes
			 * so that we can catch duplicate key
			 */
        // Get the ConglomerateDescriptor for the index
        ConglomerateDescriptor cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
        int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
        boolean[] isAscending = constants.irgs[index].isAscending();
        int numColumnOrderings;
        SortObserver sortObserver;
        /* We can only reuse the wrappers when doing an
			 * external sort if there is only 1 index.  Otherwise,
			 * we could get in a situation where 1 sort reuses a
			 * wrapper that is still in use in another sort.
			 */
        boolean reuseWrappers = (numIndexes == 1);
        final IndexRowGenerator indDes = cd.getIndexDescriptor();
        Properties sortProperties = null;
        String indexOrConstraintName = cd.getConglomerateName();
        boolean deferred = false;
        boolean deferrable = false;
        UUID uniqueDeferrableConstraintId = null;
        if (cd.isConstraint()) {
            // so, the index is backing up a constraint
            ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
            indexOrConstraintName = conDesc.getConstraintName();
            deferred = lcc.isEffectivelyDeferred(lcc.getCurrentSQLSessionContext(activation), conDesc.getUUID());
            deferrable = conDesc.deferrable();
            uniqueDeferrableConstraintId = conDesc.getUUID();
        }
        if (indDes.isUnique() || indDes.isUniqueDeferrable()) {
            numColumnOrderings = indDes.isUnique() ? baseColumnPositions.length : baseColumnPositions.length + 1;
            sortObserver = new UniqueIndexSortObserver(lcc, uniqueDeferrableConstraintId, // don't clone rows
            false, deferrable, deferred, indexOrConstraintName, indexRows[index], reuseWrappers, td.getName());
        } else if (indDes.isUniqueWithDuplicateNulls()) {
            numColumnOrderings = baseColumnPositions.length + 1;
            // tell transaction controller to use the unique with
            // duplicate nulls sorter, when making createSort() call.
            sortProperties = new Properties();
            sortProperties.put(AccessFactoryGlobals.IMPL_TYPE, AccessFactoryGlobals.SORT_UNIQUEWITHDUPLICATENULLS_EXTERNAL);
            // use sort operator which treats nulls unequal
            sortObserver = new UniqueWithDuplicateNullsIndexSortObserver(lcc, uniqueDeferrableConstraintId, true, deferrable, deferred, indexOrConstraintName, indexRows[index], true, td.getName());
        } else {
            numColumnOrderings = baseColumnPositions.length + 1;
            sortObserver = new BasicSortObserver(false, false, indexRows[index], reuseWrappers);
        }
        ordering[index] = new ColumnOrdering[numColumnOrderings];
        for (int ii = 0; ii < isAscending.length; ii++) {
            ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
        }
        if (numColumnOrderings > isAscending.length) {
            ordering[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
        }
        // set collation templates for later index creation
        // call (createAndLoadConglomerate())
        collation[index] = constants.irgs[index].getColumnCollationIds(td.getColumnDescriptorList());
        // create the sorters
        sortIds[index] = tc.createSort(sortProperties, indexRows[index].getRowArrayClone(), ordering[index], sortObserver, // not in order
        false, // est rows
        (int) sourceResultSet.getEstimatedRowCount(), // est row size, -1 means no idea
        -1);
        needToDropSort[index] = true;
    }
    sorters = new SortController[numIndexes];
    // Open the sorts
    for (int index = 0; index < numIndexes; index++) {
        sorters[index] = tc.openSort(sortIds[index]);
        needToDropSort[index] = true;
    }
}
Also used : Properties(java.util.Properties) LanguageProperties(org.apache.derby.iapi.sql.LanguageProperties) ConglomerateDescriptor(org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor) SortObserver(org.apache.derby.iapi.store.access.SortObserver) IndexRowGenerator(org.apache.derby.iapi.sql.dictionary.IndexRowGenerator) ConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor) FormatableBitSet(org.apache.derby.iapi.services.io.FormatableBitSet) UUID(org.apache.derby.catalog.UUID)

Aggregations

ConstraintDescriptor (org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor)34 ReferencedKeyConstraintDescriptor (org.apache.derby.iapi.sql.dictionary.ReferencedKeyConstraintDescriptor)16 ConstraintDescriptorList (org.apache.derby.iapi.sql.dictionary.ConstraintDescriptorList)15 DataDictionary (org.apache.derby.iapi.sql.dictionary.DataDictionary)15 ConglomerateDescriptor (org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor)12 ForeignKeyConstraintDescriptor (org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor)12 UUID (org.apache.derby.catalog.UUID)10 LanguageConnectionContext (org.apache.derby.iapi.sql.conn.LanguageConnectionContext)10 TransactionController (org.apache.derby.iapi.store.access.TransactionController)10 FormatableBitSet (org.apache.derby.iapi.services.io.FormatableBitSet)9 SchemaDescriptor (org.apache.derby.iapi.sql.dictionary.SchemaDescriptor)9 TableDescriptor (org.apache.derby.iapi.sql.dictionary.TableDescriptor)9 CheckConstraintDescriptor (org.apache.derby.iapi.sql.dictionary.CheckConstraintDescriptor)8 DependencyManager (org.apache.derby.iapi.sql.depend.DependencyManager)7 ColumnDescriptor (org.apache.derby.iapi.sql.dictionary.ColumnDescriptor)7 ExecRow (org.apache.derby.iapi.sql.execute.ExecRow)7 ColumnDescriptorList (org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList)6 SubCheckConstraintDescriptor (org.apache.derby.iapi.sql.dictionary.SubCheckConstraintDescriptor)5 SubConstraintDescriptor (org.apache.derby.iapi.sql.dictionary.SubConstraintDescriptor)5 SubKeyConstraintDescriptor (org.apache.derby.iapi.sql.dictionary.SubKeyConstraintDescriptor)5