use of org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor in project derby by apache.
the class DataDictionaryImpl method dropSubKeyConstraint.
/**
* Drop the matching row from syskeys when dropping a primary key
* or unique constraint.
*
* @param constraint the constraint
* @param tc The TransactionController
*
* @exception StandardException Thrown on failure
*/
private void dropSubKeyConstraint(ConstraintDescriptor constraint, TransactionController tc) throws StandardException {
ExecIndexRow keyRow1 = null;
DataValueDescriptor constraintIdOrderable;
TabInfoImpl ti;
int baseNum;
int indexNum;
if (constraint.getConstraintType() == DataDictionary.FOREIGNKEY_CONSTRAINT) {
baseNum = SYSFOREIGNKEYS_CATALOG_NUM;
indexNum = SYSFOREIGNKEYSRowFactory.SYSFOREIGNKEYS_INDEX1_ID;
/*
** If we have a foreign key, we need to decrement the
** reference count of the contraint that this FK references.
** We need to do this *before* we drop the foreign key
** because of the way FK.getReferencedConstraint() works.
*/
if (constraint.getConstraintType() == DataDictionary.FOREIGNKEY_CONSTRAINT) {
ReferencedKeyConstraintDescriptor refDescriptor = (ReferencedKeyConstraintDescriptor) getConstraintDescriptor(((ForeignKeyConstraintDescriptor) constraint).getReferencedConstraintId());
if (refDescriptor != null) {
refDescriptor.decrementReferenceCount();
int[] colsToSet = new int[1];
colsToSet[0] = SYSCONSTRAINTSRowFactory.SYSCONSTRAINTS_REFERENCECOUNT;
updateConstraintDescriptor(refDescriptor, refDescriptor.getUUID(), colsToSet, tc);
}
}
} else {
baseNum = SYSKEYS_CATALOG_NUM;
indexNum = SYSKEYSRowFactory.SYSKEYS_INDEX1_ID;
}
ti = getNonCoreTI(baseNum);
/* Use constraintIdOrderable in both start
* and stop position for index 1 scan.
*/
constraintIdOrderable = getIDValueAsCHAR(constraint.getUUID());
/* Set up the start/stop position for the scan */
keyRow1 = (ExecIndexRow) exFactory.getIndexableRow(1);
keyRow1.setColumn(1, constraintIdOrderable);
ti.deleteRow(tc, keyRow1, indexNum);
}
use of org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor in project derby by apache.
the class CreateConstraintConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for CREATE CONSTRAINT.
* <P>
* A constraint is represented as:
* <UL>
* <LI> ConstraintDescriptor.
* </UL>
* If a backing index is required then the index will
* be created through an CreateIndexConstantAction setup
* by the compiler.
* <BR>
* Dependencies are created as:
* <UL>
* <LI> ConstraintDescriptor depends on all the providers collected
* at compile time and passed into the constructor.
* <LI> For a FOREIGN KEY constraint ConstraintDescriptor depends
* on the ConstraintDescriptor for the referenced constraints
* and the privileges required to create the constraint.
* </UL>
*
* @see ConstraintDescriptor
* @see CreateIndexConstantAction
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
ConglomerateDescriptor conglomDesc = null;
ConglomerateDescriptor[] conglomDescs = null;
ConstraintDescriptor conDesc = null;
TableDescriptor td = null;
UUID indexId = null;
String uniqueName;
String backingIndexName;
/* RESOLVE - blow off not null constraints for now (and probably for ever) */
if (constraintType == DataDictionary.NOTNULL_CONSTRAINT) {
return;
}
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
cf = lcc.getLanguageConnectionFactory().getClassFactory();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
/* Table gets locked in AlterTableConstantAction */
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc, true);
/* Try to get the TableDescriptor from
* the Activation. We will go to the
* DD if not there. (It should always be
* there except when in a target.)
*/
td = activation.getDDLTableDescriptor();
if (td == null) {
/* tableId will be non-null if adding a
* constraint to an existing table.
*/
if (tableId != null) {
td = dd.getTableDescriptor(tableId);
} else {
td = dd.getTableDescriptor(tableName, sd, tc);
}
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tableName);
}
activation.setDDLTableDescriptor(td);
}
/* Generate the UUID for the backing index. This will become the
* constraint's name, if no name was specified.
*/
UUIDFactory uuidFactory = dd.getUUIDFactory();
UUID constrId = uuidFactory.createUUID();
/* Create the index, if there's one for this constraint */
if (indexAction != null) {
if (indexAction.getIndexName() == null) {
/* Set the index name */
backingIndexName = uuidFactory.createUUID().toString();
indexAction.setIndexName(backingIndexName);
} else {
backingIndexName = indexAction.getIndexName();
}
indexAction.setConstraintID(constrId);
/* Create the index */
indexAction.executeConstantAction(activation);
/* Get the conglomerate descriptor for the backing index */
conglomDescs = td.getConglomerateDescriptors();
for (int index = 0; index < conglomDescs.length; index++) {
conglomDesc = conglomDescs[index];
/* Check for conglomerate being an index first, since
* name is null for heap.
*/
if (conglomDesc.isIndex() && backingIndexName.equals(conglomDesc.getConglomerateName())) {
break;
}
}
if (SanityManager.DEBUG) {
SanityManager.ASSERT(conglomDesc != null, "conglomDesc is expected to be non-null after search for backing index");
SanityManager.ASSERT(conglomDesc.isIndex(), "conglomDesc is expected to be indexable after search for backing index");
SanityManager.ASSERT(conglomDesc.getConglomerateName().equals(backingIndexName), "conglomDesc name expected to be the same as backing index name after search for backing index");
}
indexId = conglomDesc.getUUID();
}
boolean[] defaults = new boolean[] { ConstraintDefinitionNode.DEFERRABLE_DEFAULT, ConstraintDefinitionNode.INITIALLY_DEFERRED_DEFAULT, ConstraintDefinitionNode.ENFORCED_DEFAULT };
for (int i = 0; i < characteristics.length; i++) {
if (characteristics[i] != defaults[i]) {
dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, "DEFERRED CONSTRAINTS");
if (constraintType == DataDictionary.NOTNULL_CONSTRAINT || !characteristics[2]) /* not enforced */
{
// Remove when feature DERBY-532 is completed
if (!PropertyUtil.getSystemProperty("derby.constraintsTesting", "false").equals("true")) {
throw StandardException.newException(SQLState.NOT_IMPLEMENTED, "non-default constraint characteristics");
}
}
}
}
/* Now, lets create the constraint descriptor */
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
switch(constraintType) {
case DataDictionary.PRIMARYKEY_CONSTRAINT:
conDesc = ddg.newPrimaryKeyConstraintDescriptor(td, constraintName, // deferable,
characteristics[0], // initiallyDeferred,
characteristics[1], // int[],
genColumnPositions(td, false), constrId, indexId, sd, characteristics[2], // referenceCount
0);
dd.addConstraintDescriptor(conDesc, tc);
break;
case DataDictionary.UNIQUE_CONSTRAINT:
conDesc = ddg.newUniqueConstraintDescriptor(td, constraintName, // deferable,
characteristics[0], // initiallyDeferred,
characteristics[1], // int[],
genColumnPositions(td, false), constrId, indexId, sd, characteristics[2], // referenceCount
0);
dd.addConstraintDescriptor(conDesc, tc);
break;
case DataDictionary.CHECK_CONSTRAINT:
conDesc = ddg.newCheckConstraintDescriptor(td, constraintName, // deferable,
characteristics[0], // initiallyDeferred,
characteristics[1], constrId, constraintText, // int[],
new ReferencedColumnsDescriptorImpl(genColumnPositions(td, false)), sd, characteristics[2]);
dd.addConstraintDescriptor(conDesc, tc);
storeConstraintDependenciesOnPrivileges(activation, conDesc, null, providerInfo);
break;
case DataDictionary.FOREIGNKEY_CONSTRAINT:
ReferencedKeyConstraintDescriptor referencedConstraint = DDUtils.locateReferencedConstraint(dd, td, constraintName, columnNames, otherConstraintInfo);
DDUtils.validateReferentialActions(dd, td, constraintName, otherConstraintInfo, columnNames);
conDesc = ddg.newForeignKeyConstraintDescriptor(td, constraintName, // deferable,
characteristics[0], // initiallyDeferred,
characteristics[1], // int[],
genColumnPositions(td, false), constrId, indexId, sd, referencedConstraint, characteristics[2], otherConstraintInfo.getReferentialActionDeleteRule(), otherConstraintInfo.getReferentialActionUpdateRule());
// try to create the constraint first, because it
// is expensive to do the bulk check, find obvious
// errors first
dd.addConstraintDescriptor(conDesc, tc);
/* No need to do check if we're creating a
* table.
*/
if ((!forCreateTable) && dd.activeConstraint(conDesc)) {
validateFKConstraint(activation, tc, dd, (ForeignKeyConstraintDescriptor) conDesc, referencedConstraint, ((CreateIndexConstantAction) indexAction).getIndexTemplateRow());
}
/* Create stored dependency on the referenced constraint */
dm.addDependency(conDesc, referencedConstraint, lcc.getContextManager());
// store constraint's dependency on REFERENCES privileges in the dependeny system
storeConstraintDependenciesOnPrivileges(activation, conDesc, referencedConstraint.getTableId(), providerInfo);
break;
case DataDictionary.MODIFY_CONSTRAINT:
throw StandardException.newException(SQLState.NOT_IMPLEMENTED, "ALTER CONSTRAINT");
default:
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("contraintType (" + constraintType + ") has unexpected value");
}
break;
}
/* Create stored dependencies for each provider */
if (providerInfo != null) {
for (int ix = 0; ix < providerInfo.length; ix++) {
Provider provider = null;
/* We should always be able to find the Provider */
provider = (Provider) providerInfo[ix].getDependableFinder().getDependable(dd, providerInfo[ix].getObjectId());
dm.addDependency(conDesc, provider, lcc.getContextManager());
}
}
/* Finally, invalidate off of the table descriptor(s)
* to ensure that any dependent statements get
* re-compiled.
*/
if (!forCreateTable) {
dm.invalidateFor(td, DependencyManager.CREATE_CONSTRAINT, lcc);
}
if (constraintType == DataDictionary.FOREIGNKEY_CONSTRAINT) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(conDesc != null, "conDesc expected to be non-null");
if (!(conDesc instanceof ForeignKeyConstraintDescriptor)) {
SanityManager.THROWASSERT("conDesc expected to be instance of ForeignKeyConstraintDescriptor, not " + conDesc.getClass().getName());
}
}
dm.invalidateFor(((ForeignKeyConstraintDescriptor) conDesc).getReferencedConstraint().getTableDescriptor(), DependencyManager.CREATE_CONSTRAINT, lcc);
}
this.constraintId = constrId;
}
use of org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor in project derby by apache.
the class AlterTableConstantAction method truncateTable.
/*
* TRUNCATE TABLE TABLENAME; (quickly removes all the rows from table and
* it's correctponding indexes).
* Truncate is implemented by dropping the existing conglomerates(heap,indexes) and recreating a
* new ones with the properties of dropped conglomerates. Currently Store
* does not have support to truncate existing conglomerated until store
* supports it , this is the only way to do it.
* Error Cases: Truncate error cases same as other DDL's statements except
* 1)Truncate is not allowed when the table is references by another table.
* 2)Truncate is not allowed when there are enabled delete triggers on the table.
* Note: Because conglomerate number is changed during recreate process all the statements will be
* marked as invalide and they will get recompiled internally on their next
* execution. This is okay because truncate makes the number of rows to zero
* it may be good idea to recompile them becuase plans are likely to be
* incorrect. Recompile is done internally by Derby, user does not have
* any effect.
*/
private void truncateTable() throws StandardException {
ExecRow emptyHeapRow;
long newHeapConglom;
Properties properties = new Properties();
RowLocation rl;
if (SanityManager.DEBUG) {
if (lockGranularity != '\0') {
SanityManager.THROWASSERT("lockGranularity expected to be '\0', not " + lockGranularity);
}
SanityManager.ASSERT(columnInfo == null, "columnInfo expected to be null");
SanityManager.ASSERT(constraintActions == null, "constraintActions expected to be null");
}
// and the ON DELETE action is NO ACTION.
for (ConstraintDescriptor cd : dd.getConstraintDescriptors(td)) {
if (cd instanceof ReferencedKeyConstraintDescriptor) {
final ReferencedKeyConstraintDescriptor rfcd = (ReferencedKeyConstraintDescriptor) cd;
for (ConstraintDescriptor fkcd : rfcd.getNonSelfReferencingFK(ConstraintDescriptor.ENABLED)) {
final ForeignKeyConstraintDescriptor fk = (ForeignKeyConstraintDescriptor) fkcd;
throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_FK_REFERENCE_TABLE, td.getName());
}
}
}
// truncate is not allowed when there are enabled DELETE triggers
for (TriggerDescriptor trd : dd.getTriggerDescriptors(td)) {
if (trd.listensForEvent(TriggerDescriptor.TRIGGER_EVENT_DELETE) && trd.isEnabled()) {
throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_ENABLED_DELETE_TRIGGERS, td.getName(), trd.getName());
}
}
// gather information from the existing conglomerate to create new one.
emptyHeapRow = td.getEmptyExecRow();
compressHeapCC = tc.openConglomerate(td.getHeapConglomerateId(), false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
rl = compressHeapCC.newRowLocationTemplate();
// Get the properties on the old heap
compressHeapCC.getInternalTablePropertySet(properties);
compressHeapCC.close();
compressHeapCC = null;
// create new conglomerate
newHeapConglom = tc.createConglomerate("heap", emptyHeapRow.getRowArray(), // column sort order - not required for heap
null, td.getColumnCollationIds(), properties, TransactionController.IS_DEFAULT);
/* Set up index info to perform truncate on them*/
getAffectedIndexes();
if (numIndexes > 0) {
indexRows = new ExecIndexRow[numIndexes];
ordering = new ColumnOrdering[numIndexes][];
collation = new int[numIndexes][];
for (int index = 0; index < numIndexes; index++) {
IndexRowGenerator curIndex = compressIRGs[index];
// create a single index row template for each index
indexRows[index] = curIndex.getIndexRowTemplate();
curIndex.getIndexRow(emptyHeapRow, rl, indexRows[index], (FormatableBitSet) null);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* No need to try to enforce uniqueness here as
* index should be valid.
*/
int[] baseColumnPositions = curIndex.baseColumnPositions();
boolean[] isAscending = curIndex.isAscending();
int numColumnOrderings;
numColumnOrderings = baseColumnPositions.length + 1;
ordering[index] = new ColumnOrdering[numColumnOrderings];
collation[index] = curIndex.getColumnCollationIds(td.getColumnDescriptorList());
for (int ii = 0; ii < numColumnOrderings - 1; ii++) {
ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
ordering[index][numColumnOrderings - 1] = new IndexColumnOrder(numColumnOrderings - 1);
}
}
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
// truncate all indexes
if (numIndexes > 0) {
long[] newIndexCongloms = new long[numIndexes];
for (int index = 0; index < numIndexes; index++) {
updateIndex(newHeapConglom, dd, index, newIndexCongloms);
}
}
// Update the DataDictionary
// Get the ConglomerateDescriptor for the heap
long oldHeapConglom = td.getHeapConglomerateId();
ConglomerateDescriptor cd = td.getConglomerateDescriptor(oldHeapConglom);
// Update sys.sysconglomerates with new conglomerate #
dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
// Now that the updated information is available in the system tables,
// we should invalidate all statements that use the old conglomerates
dm.invalidateFor(td, DependencyManager.TRUNCATE_TABLE, lcc);
// Drop the old conglomerate
tc.dropConglomerate(oldHeapConglom);
cleanUp();
}
use of org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor in project derby by apache.
the class DropConstraintConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for DROP CONSTRAINT.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
ConstraintDescriptor conDesc = null;
TableDescriptor td;
UUID indexId = null;
String indexUUIDString;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
td = dd.getTableDescriptor(tableId);
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tableName);
}
/* Table gets locked in AlterTableConstantAction */
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor tdSd = td.getSchemaDescriptor();
SchemaDescriptor constraintSd = constraintSchemaName == null ? tdSd : dd.getSchemaDescriptor(constraintSchemaName, tc, true);
/* Get the constraint descriptor for the index, along
* with an exclusive row lock on the row in sys.sysconstraints
* in order to ensure that no one else compiles against the
* index.
*/
if (// this means "alter table drop primary key"
constraintName == null)
conDesc = dd.getConstraintDescriptors(td).getPrimaryKey();
else
conDesc = dd.getConstraintDescriptorByName(td, constraintSd, constraintName, true);
// Error if constraint doesn't exist
if (conDesc == null) {
String errorName = constraintName == null ? "PRIMARY KEY" : (constraintSd.getSchemaName() + "." + constraintName);
throw StandardException.newException(SQLState.LANG_DROP_OR_ALTER_NON_EXISTING_CONSTRAINT, errorName, td.getQualifiedName());
}
switch(verifyType) {
case DataDictionary.UNIQUE_CONSTRAINT:
if (conDesc.getConstraintType() != verifyType)
throw StandardException.newException(SQLState.LANG_DROP_CONSTRAINT_TYPE, constraintName, "UNIQUE");
break;
case DataDictionary.CHECK_CONSTRAINT:
if (conDesc.getConstraintType() != verifyType)
throw StandardException.newException(SQLState.LANG_DROP_CONSTRAINT_TYPE, constraintName, "CHECK");
break;
case DataDictionary.FOREIGNKEY_CONSTRAINT:
if (conDesc.getConstraintType() != verifyType)
throw StandardException.newException(SQLState.LANG_DROP_CONSTRAINT_TYPE, constraintName, "FOREIGN KEY");
break;
}
boolean cascadeOnRefKey = (cascade && conDesc instanceof ReferencedKeyConstraintDescriptor);
if (!cascadeOnRefKey) {
dm.invalidateFor(conDesc, DependencyManager.DROP_CONSTRAINT, lcc);
}
/*
** If we had a primary/unique key and it is drop cascade,
** drop all the referencing keys now. We MUST do this AFTER
** dropping the referenced key because otherwise we would
** be repeatedly changing the reference count of the referenced
** key and generating unnecessary I/O.
*/
dropConstraint(conDesc, activation, lcc, !cascadeOnRefKey);
if (cascadeOnRefKey) {
ForeignKeyConstraintDescriptor fkcd;
ReferencedKeyConstraintDescriptor cd;
ConstraintDescriptorList cdl;
cd = (ReferencedKeyConstraintDescriptor) conDesc;
cdl = cd.getForeignKeyConstraints(ReferencedKeyConstraintDescriptor.ALL);
int cdlSize = cdl.size();
for (int index = 0; index < cdlSize; index++) {
fkcd = (ForeignKeyConstraintDescriptor) cdl.elementAt(index);
dm.invalidateFor(fkcd, DependencyManager.DROP_CONSTRAINT, lcc);
dropConstraint(fkcd, activation, lcc, true);
}
/*
** We told dropConstraintAndIndex not to
** remove our dependencies, so send an invalidate,
** and drop the dependencies.
*/
dm.invalidateFor(conDesc, DependencyManager.DROP_CONSTRAINT, lcc);
dm.clearDependencies(lcc, conDesc);
}
}
use of org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor in project derby by apache.
the class SYSFOREIGNKEYSRowFactory method makeRow.
// ///////////////////////////////////////////////////////////////////////////
//
// METHODS
//
// ///////////////////////////////////////////////////////////////////////////
/**
* Make a SYSFOREIGNKEYS row
*
* @return Row suitable for inserting into SYSFOREIGNKEYS.
*
* @exception StandardException thrown on failure
*/
public ExecRow makeRow(TupleDescriptor td, TupleDescriptor parent) throws StandardException {
DataValueDescriptor col;
ExecIndexRow row;
String constraintId = null;
String keyConstraintId = null;
String conglomId = null;
String raDeleteRule = "N";
String raUpdateRule = "N";
if (td != null) {
ForeignKeyConstraintDescriptor cd = (ForeignKeyConstraintDescriptor) td;
constraintId = cd.getUUID().toString();
ReferencedKeyConstraintDescriptor refCd = cd.getReferencedConstraint();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(refCd != null, "this fk returned a null referenced key");
}
keyConstraintId = refCd.getUUID().toString();
conglomId = cd.getIndexUUIDString();
raDeleteRule = getRefActionAsString(cd.getRaDeleteRule());
raUpdateRule = getRefActionAsString(cd.getRaUpdateRule());
}
/* Build the row */
row = getExecutionFactory().getIndexableRow(SYSFOREIGNKEYS_COLUMN_COUNT);
/* 1st column is CONSTRAINTID (UUID - char(36)) */
row.setColumn(SYSFOREIGNKEYS_CONSTRAINTID, new SQLChar(constraintId));
/* 2nd column is CONGLOMERATEID (UUID - char(36)) */
row.setColumn(SYSFOREIGNKEYS_CONGLOMERATEID, new SQLChar(conglomId));
/* 3rd column is KEYCONSTRAINTID (UUID - char(36)) */
row.setColumn(SYSFOREIGNKEYS_KEYCONSTRAINTID, new SQLChar(keyConstraintId));
// currently, DELETERULE and UPDATERULE are always "R" for restrict
/* 4th column is DELETERULE char(1) */
row.setColumn(SYSFOREIGNKEYS_DELETERULE, new SQLChar(raDeleteRule));
/* 5th column is UPDATERULE char(1) */
row.setColumn(SYSFOREIGNKEYS_UPDATERULE, new SQLChar(raUpdateRule));
return row;
}
Aggregations