use of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator in project derby by apache.
the class CreateAliasConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for
* CREATE FUNCTION, PROCEDURE, SYNONYM, and TYPE.
* <P>
* A function, procedure, or udt is represented as:
* <UL>
* <LI> AliasDescriptor
* </UL>
* Routine dependencies are created as:
* <UL>
* <LI> None
* </UL>
*
* <P>
* A synonym is represented as:
* <UL>
* <LI> AliasDescriptor
* <LI> TableDescriptor
* </UL>
* Synonym dependencies are created as:
* <UL>
* <LI> None
* </UL>
*
* In both cases a SchemaDescriptor will be created if
* needed. No dependency is created on the SchemaDescriptor.
*
* @see ConstantAction#executeConstantAction
* @see AliasDescriptor
* @see TableDescriptor
* @see SchemaDescriptor
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
TransactionController tc = lcc.getTransactionExecute();
// For routines no validity checking is made
// on the Java method, that is checked when the
// routine is executed.
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
SchemaDescriptor sd = DDLConstantAction.getSchemaDescriptorForCreate(dd, activation, schemaName);
//
// Create a new alias descriptor with aliasID filled in.
//
UUID aliasID = dd.getUUIDFactory().createUUID();
AliasDescriptor ads = new AliasDescriptor(dd, aliasID, aliasName, sd.getUUID(), javaClassName, aliasType, nameSpace, false, aliasInfo, null);
// perform duplicate rule checking
switch(aliasType) {
case AliasInfo.ALIAS_TYPE_AGGREGATE_AS_CHAR:
AliasDescriptor duplicateAlias = dd.getAliasDescriptor(sd.getUUID().toString(), aliasName, nameSpace);
if (duplicateAlias != null) {
throw StandardException.newException(SQLState.LANG_OBJECT_ALREADY_EXISTS, ads.getDescriptorType(), aliasName);
}
// also don't want to collide with 1-arg functions by the same name
List<AliasDescriptor> funcList = dd.getRoutineList(sd.getUUID().toString(), aliasName, AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR);
for (int i = 0; i < funcList.size(); i++) {
AliasDescriptor func = funcList.get(i);
RoutineAliasInfo funcInfo = (RoutineAliasInfo) func.getAliasInfo();
if (funcInfo.getParameterCount() == 1) {
throw StandardException.newException(SQLState.LANG_BAD_UDA_OR_FUNCTION_NAME, schemaName, aliasName);
}
}
break;
case AliasInfo.ALIAS_TYPE_UDT_AS_CHAR:
AliasDescriptor duplicateUDT = dd.getAliasDescriptor(sd.getUUID().toString(), aliasName, nameSpace);
if (duplicateUDT != null) {
throw StandardException.newException(SQLState.LANG_OBJECT_ALREADY_EXISTS, ads.getDescriptorType(), aliasName);
}
break;
case AliasInfo.ALIAS_TYPE_PROCEDURE_AS_CHAR:
vetRoutine(dd, sd, ads);
break;
case AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR:
vetRoutine(dd, sd, ads);
// if this is a 1-arg function, make sure there isn't an aggregate
// by the same qualified name
int paramCount = ((RoutineAliasInfo) aliasInfo).getParameterCount();
if (paramCount == 1) {
AliasDescriptor aliasCollision = dd.getAliasDescriptor(sd.getUUID().toString(), aliasName, AliasInfo.ALIAS_NAME_SPACE_AGGREGATE_AS_CHAR);
if (aliasCollision != null) {
throw StandardException.newException(SQLState.LANG_BAD_UDA_OR_FUNCTION_NAME, schemaName, aliasName);
}
}
break;
case AliasInfo.ALIAS_TYPE_SYNONYM_AS_CHAR:
// If target table/view exists already, error.
TableDescriptor targetTD = dd.getTableDescriptor(aliasName, sd, tc);
if (targetTD != null) {
throw StandardException.newException(SQLState.LANG_OBJECT_ALREADY_EXISTS, targetTD.getDescriptorType(), targetTD.getDescriptorName());
}
// Detect synonym cycles, if present.
String nextSynTable = ((SynonymAliasInfo) aliasInfo).getSynonymTable();
String nextSynSchema = ((SynonymAliasInfo) aliasInfo).getSynonymSchema();
SchemaDescriptor nextSD;
for (; ; ) {
nextSD = dd.getSchemaDescriptor(nextSynSchema, tc, false);
if (nextSD == null)
break;
AliasDescriptor nextAD = dd.getAliasDescriptor(nextSD.getUUID().toString(), nextSynTable, nameSpace);
if (nextAD == null)
break;
SynonymAliasInfo info = (SynonymAliasInfo) nextAD.getAliasInfo();
nextSynTable = info.getSynonymTable();
nextSynSchema = info.getSynonymSchema();
if (aliasName.equals(nextSynTable) && schemaName.equals(nextSynSchema))
throw StandardException.newException(SQLState.LANG_SYNONYM_CIRCULAR, aliasName, ((SynonymAliasInfo) aliasInfo).getSynonymTable());
}
// If synonym final target is not present, raise a warning
if (nextSD != null)
targetTD = dd.getTableDescriptor(nextSynTable, nextSD, tc);
if (nextSD == null || targetTD == null)
activation.addWarning(StandardException.newWarning(SQLState.LANG_SYNONYM_UNDEFINED, aliasName, nextSynSchema + "." + nextSynTable));
// To prevent any possible deadlocks with SYSTABLES, we insert a row into
// SYSTABLES also for synonyms. This also ensures tables/views/synonyms share
// same namespace
TableDescriptor td;
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
td = ddg.newTableDescriptor(aliasName, sd, TableDescriptor.SYNONYM_TYPE, TableDescriptor.DEFAULT_LOCK_GRANULARITY);
dd.addDescriptor(td, sd, DataDictionary.SYSTABLES_CATALOG_NUM, false, tc);
break;
default:
break;
}
dd.addDescriptor(ads, null, DataDictionary.SYSALIASES_CATALOG_NUM, false, tc);
adjustUDTDependencies(lcc, dd, ads, true);
}
use of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator in project derby by apache.
the class CreateIndexConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for
* creating an index.
*
* <P>
* A index is represented as:
* <UL>
* <LI> ConglomerateDescriptor.
* </UL>
* No dependencies are created.
*
* @see ConglomerateDescriptor
* @see SchemaDescriptor
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID toid;
ColumnDescriptor columnDescriptor;
int[] baseColumnPositions;
IndexRowGenerator indexRowGenerator = null;
ExecRow[] baseRows;
ExecIndexRow[] indexRows;
ExecRow[] compactBaseRows;
GroupFetchScanController scan;
RowLocationRetRowSource rowSource;
long sortId;
int maxBaseColumnPosition = -1;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc, true);
/* Get the table descriptor. */
/* See if we can get the TableDescriptor
* from the Activation. (Will be there
* for backing indexes.)
*/
td = activation.getDDLTableDescriptor();
if (td == null) {
/* tableId will be non-null if adding an index to
* an existing table (as opposed to creating a
* table with a constraint with a backing index).
*/
if (tableId != null) {
td = dd.getTableDescriptor(tableId);
} else {
td = dd.getTableDescriptor(tableName, sd, tc);
}
}
if (td == null) {
throw StandardException.newException(SQLState.LANG_CREATE_INDEX_NO_TABLE, indexName, tableName);
}
if (td.getTableType() == TableDescriptor.SYSTEM_TABLE_TYPE) {
throw StandardException.newException(SQLState.LANG_CREATE_SYSTEM_INDEX_ATTEMPTED, indexName, tableName);
}
/* Get a shared table lock on the table. We need to lock table before
* invalidate dependents, otherwise, we may interfere with the
* compilation/re-compilation of DML/DDL. See beetle 4325 and $WS/
* docs/language/SolutionsToConcurrencyIssues.txt (point f).
*/
lockTableForDDL(tc, td.getHeapConglomerateId(), false);
// depended on this table (including this one)
if (!forCreateTable) {
dm.invalidateFor(td, DependencyManager.CREATE_INDEX, lcc);
}
// Translate the base column names to column positions
baseColumnPositions = new int[columnNames.length];
for (int i = 0; i < columnNames.length; i++) {
// Look up the column in the data dictionary
columnDescriptor = td.getColumnDescriptor(columnNames[i]);
if (columnDescriptor == null) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, columnNames[i], tableName);
}
TypeId typeId = columnDescriptor.getType().getTypeId();
// Don't allow a column to be created on a non-orderable type
ClassFactory cf = lcc.getLanguageConnectionFactory().getClassFactory();
boolean isIndexable = typeId.orderable(cf);
if (isIndexable && typeId.userType()) {
String userClass = typeId.getCorrespondingJavaTypeName();
// run the compare method.
try {
if (cf.isApplicationClass(cf.loadApplicationClass(userClass)))
isIndexable = false;
} catch (ClassNotFoundException cnfe) {
// shouldn't happen as we just check the class is orderable
isIndexable = false;
}
}
if (!isIndexable) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_ORDERABLE_DURING_EXECUTION, typeId.getSQLTypeName());
}
// Remember the position in the base table of each column
baseColumnPositions[i] = columnDescriptor.getPosition();
if (maxBaseColumnPosition < baseColumnPositions[i])
maxBaseColumnPosition = baseColumnPositions[i];
}
/* The code below tries to determine if the index that we're about
* to create can "share" a conglomerate with an existing index.
* If so, we will use a single physical conglomerate--namely, the
* one that already exists--to support both indexes. I.e. we will
* *not* create a new conglomerate as part of this constant action.
*
* Deferrable constraints are backed by indexes that are *not* shared
* since they use physically non-unique indexes and as such are
* different from indexes used to represent non-deferrable
* constraints.
*/
// check if we have similar indices already for this table
ConglomerateDescriptor[] congDescs = td.getConglomerateDescriptors();
boolean shareExisting = false;
for (int i = 0; i < congDescs.length; i++) {
ConglomerateDescriptor cd = congDescs[i];
if (!cd.isIndex())
continue;
if (droppedConglomNum == cd.getConglomerateNumber()) {
/* We can't share with any conglomerate descriptor
* whose conglomerate number matches the dropped
* conglomerate number, because that descriptor's
* backing conglomerate was dropped, as well. If
* we're going to share, we have to share with a
* descriptor whose backing physical conglomerate
* is still around.
*/
continue;
}
IndexRowGenerator irg = cd.getIndexDescriptor();
int[] bcps = irg.baseColumnPositions();
boolean[] ia = irg.isAscending();
int j = 0;
/* The conditions which allow an index to share an existing
* conglomerate are as follows:
*
* 1. the set of columns (both key and include columns) and their
* order in the index is the same as that of an existing index AND
*
* 2. the ordering attributes are the same AND
*
* 3. one of the following is true:
* a) the existing index is unique, OR
* b) the existing index is non-unique with uniqueWhenNotNulls
* set to TRUE and the index being created is non-unique, OR
* c) both the existing index and the one being created are
* non-unique and have uniqueWithDuplicateNulls set to FALSE.
*
* 4. hasDeferrableChecking is FALSE.
*/
boolean possibleShare = (irg.isUnique() || !unique) && (bcps.length == baseColumnPositions.length) && !hasDeferrableChecking;
// is set to true (backing index for unique constraint)
if (possibleShare && !irg.isUnique()) {
/* If the existing index has uniqueWithDuplicateNulls set to
* TRUE it can be shared by other non-unique indexes; otherwise
* the existing non-unique index has uniqueWithDuplicateNulls
* set to FALSE, which means the new non-unique conglomerate
* can only share if it has uniqueWithDuplicateNulls set to
* FALSE, as well.
*/
possibleShare = (irg.isUniqueWithDuplicateNulls() || !uniqueWithDuplicateNulls);
}
if (possibleShare && indexType.equals(irg.indexType())) {
for (; j < bcps.length; j++) {
if ((bcps[j] != baseColumnPositions[j]) || (ia[j] != isAscending[j]))
break;
}
}
if (// share
j == baseColumnPositions.length) {
/*
* Don't allow users to create a duplicate index. Allow if being done internally
* for a constraint
*/
if (!isConstraint) {
activation.addWarning(StandardException.newWarning(SQLState.LANG_INDEX_DUPLICATE, indexName, cd.getConglomerateName()));
return;
}
/* Sharing indexes share the physical conglomerate
* underneath, so pull the conglomerate number from
* the existing conglomerate descriptor.
*/
conglomId = cd.getConglomerateNumber();
/* We create a new IndexRowGenerator because certain
* attributes--esp. uniqueness--may be different between
* the index we're creating and the conglomerate that
* already exists. I.e. even though we're sharing a
* conglomerate, the new index is not necessarily
* identical to the existing conglomerate. We have to
* keep track of that info so that if we later drop
* the shared physical conglomerate, we can figure out
* what this index (the one we're creating now) is
* really supposed to look like.
*/
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, // uniqueDeferrable
false, // deferrable indexes are not shared
false, baseColumnPositions, isAscending, baseColumnPositions.length);
// DERBY-655 and DERBY-1343
// Sharing indexes will have unique logical conglomerate UUIDs.
conglomerateUUID = dd.getUUIDFactory().createUUID();
shareExisting = true;
break;
}
}
/* If we have a droppedConglomNum then the index we're about to
* "create" already exists--i.e. it has an index descriptor and
* the corresponding information is already in the system catalogs.
* The only thing we're missing, then, is the physical conglomerate
* to back the index (because the old conglomerate was dropped).
*/
boolean alreadyHaveConglomDescriptor = (droppedConglomNum > -1L);
/* If this index already has an essentially same one, we share the
* conglomerate with the old one, and just simply add a descriptor
* entry into SYSCONGLOMERATES--unless we already have a descriptor,
* in which case we don't even need to do that.
*/
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
if (shareExisting && !alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
// can't just return yet, need to get member "indexTemplateRow"
// because create constraint may use it
}
// Describe the properties of the index to the store using Properties
// RESOLVE: The following properties assume a BTREE index.
Properties indexProperties;
if (properties != null) {
indexProperties = properties;
} else {
indexProperties = new Properties();
}
// Tell it the conglomerate id of the base table
indexProperties.put("baseConglomerateId", Long.toString(td.getHeapConglomerateId()));
if (uniqueWithDuplicateNulls && !hasDeferrableChecking) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexProperties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
} else {
// index creating a unique index instead.
if (uniqueWithDuplicateNulls) {
unique = true;
}
}
}
// All indexes are unique because they contain the RowLocation.
// The number of uniqueness columns must include the RowLocation
// if the user did not specify a unique index.
indexProperties.put("nUniqueColumns", Integer.toString(unique ? baseColumnPositions.length : baseColumnPositions.length + 1));
// By convention, the row location column is the last column
indexProperties.put("rowLocationColumn", Integer.toString(baseColumnPositions.length));
// For now, all columns are key fields, including the RowLocation
indexProperties.put("nKeyFields", Integer.toString(baseColumnPositions.length + 1));
// For now, assume that all index columns are ordered columns
if (!shareExisting) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, uniqueDeferrable, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), baseColumnPositions, isAscending, baseColumnPositions.length);
} else {
indexRowGenerator = new IndexRowGenerator(indexType, unique, false, false, false, baseColumnPositions, isAscending, baseColumnPositions.length);
}
}
/* Now add the rows from the base table to the conglomerate.
* We do this by scanning the base table and inserting the
* rows into a sorter before inserting from the sorter
* into the index. This gives us better performance
* and a more compact index.
*/
rowSource = null;
sortId = 0;
// set to true once the sorter is created
boolean needToDropSort = false;
/* bulkFetchSIze will be 16 (for now) unless
* we are creating the table in which case it
* will be 1. Too hard to remove scan when
* creating index on new table, so minimize
* work where we can.
*/
int bulkFetchSize = (forCreateTable) ? 1 : 16;
int numColumns = td.getNumberOfColumns();
int approximateRowSize = 0;
// Create the FormatableBitSet for mapping the partial to full base row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
for (int index = 0; index < baseColumnPositions.length; index++) {
bitSet.set(baseColumnPositions[index]);
}
FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1);
// Start by opening a full scan on the base table.
scan = tc.openGroupFetchScan(td.getHeapConglomerateId(), // hold
false, // open base table read only
0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, // all fields as objects
zeroBasedBitSet, // startKeyValue
(DataValueDescriptor[]) null, // not used when giving null start posn.
0, // qualifier
null, // stopKeyValue
(DataValueDescriptor[]) null, // not used when giving null stop posn.
0);
// Create an array to put base row template
baseRows = new ExecRow[bulkFetchSize];
indexRows = new ExecIndexRow[bulkFetchSize];
compactBaseRows = new ExecRow[bulkFetchSize];
try {
// Create the array of base row template
for (int i = 0; i < bulkFetchSize; i++) {
// create a base row template
baseRows[i] = activation.getExecutionFactory().getValueRow(maxBaseColumnPosition);
// create an index row template
indexRows[i] = indexRowGenerator.getIndexRowTemplate();
// create a compact base row template
compactBaseRows[i] = activation.getExecutionFactory().getValueRow(baseColumnPositions.length);
}
indexTemplateRow = indexRows[0];
// Fill the partial row with nulls of the correct type
ColumnDescriptorList cdl = td.getColumnDescriptorList();
int cdlSize = cdl.size();
for (int index = 0, numSet = 0; index < cdlSize; index++) {
if (!zeroBasedBitSet.get(index)) {
continue;
}
numSet++;
ColumnDescriptor cd = cdl.elementAt(index);
DataTypeDescriptor dts = cd.getType();
for (int i = 0; i < bulkFetchSize; i++) {
// Put the column in both the compact and sparse base rows
baseRows[i].setColumn(index + 1, dts.getNull());
compactBaseRows[i].setColumn(numSet, baseRows[i].getColumn(index + 1));
}
// Calculate the approximate row size for the index row
approximateRowSize += dts.getTypeId().getApproximateLengthInBytes(dts);
}
// Get an array of RowLocation template
RowLocation[] rl = new RowLocation[bulkFetchSize];
for (int i = 0; i < bulkFetchSize; i++) {
rl[i] = scan.newRowLocationTemplate();
// Get an index row based on the base row
indexRowGenerator.getIndexRow(compactBaseRows[i], rl[i], indexRows[i], bitSet);
}
/* now that we got indexTemplateRow, done for sharing index
*/
if (shareExisting)
return;
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key.
* We create a basic sort observer for non-unique indexes
* so that we can reuse the wrappers during an external
* sort.
*/
int numColumnOrderings;
SortObserver sortObserver;
Properties sortProperties = null;
if (unique || uniqueWithDuplicateNulls || uniqueDeferrable) {
// if the index is a constraint, use constraintname in
// possible error message
String indexOrConstraintName = indexName;
if (conglomerateUUID != null) {
ConglomerateDescriptor cd = dd.getConglomerateDescriptor(conglomerateUUID);
if ((isConstraint) && (cd != null && cd.getUUID() != null && td != null)) {
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
}
}
if (unique || uniqueDeferrable) {
numColumnOrderings = unique ? baseColumnPositions.length : baseColumnPositions.length + 1;
sortObserver = new UniqueIndexSortObserver(lcc, constraintID, true, uniqueDeferrable, initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
} else {
// unique with duplicate nulls allowed.
numColumnOrderings = baseColumnPositions.length + 1;
// tell transaction controller to use the unique with
// duplicate nulls sorter, when making createSort() call.
sortProperties = new Properties();
sortProperties.put(AccessFactoryGlobals.IMPL_TYPE, AccessFactoryGlobals.SORT_UNIQUEWITHDUPLICATENULLS_EXTERNAL);
// use sort operator which treats nulls unequal
sortObserver = new UniqueWithDuplicateNullsIndexSortObserver(lcc, constraintID, true, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
}
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(true, false, indexTemplateRow, true);
}
ColumnOrdering[] order = new ColumnOrdering[numColumnOrderings];
for (int i = 0; i < numColumnOrderings; i++) {
order[i] = new IndexColumnOrder(i, unique || i < numColumnOrderings - 1 ? isAscending[i] : true);
}
// create the sorter
sortId = tc.createSort(sortProperties, indexTemplateRow.getRowArrayClone(), order, sortObserver, // not in order
false, scan.getEstimatedRowCount(), // est row size, -1 means no idea
approximateRowSize);
needToDropSort = true;
// Populate sorter and get the output of the sorter into a row
// source. The sorter has the indexed columns only and the columns
// are in the correct order.
rowSource = loadSorter(baseRows, indexRows, tc, scan, sortId, rl);
conglomId = tc.createAndLoadConglomerate(indexType, // index row template
indexTemplateRow.getRowArray(), // colums sort order
order, indexRowGenerator.getColumnCollationIds(td.getColumnDescriptorList()), indexProperties, // not temporary
TransactionController.IS_DEFAULT, rowSource, (long[]) null);
} finally {
/* close the table scan */
if (scan != null)
scan.close();
/* close the sorter row source before throwing exception */
if (rowSource != null)
rowSource.closeRowSource();
/*
** drop the sort so that intermediate external sort run can be
** removed from disk
*/
if (needToDropSort)
tc.dropSort(sortId);
}
ConglomerateController indexController = tc.openConglomerate(conglomId, false, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
// Check to make sure that the conglomerate can be used as an index
if (!indexController.isKeyed()) {
indexController.close();
throw StandardException.newException(SQLState.LANG_NON_KEYED_INDEX, indexName, indexType);
}
indexController.close();
//
if (!alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
/* Since we created a new conglomerate descriptor, load
* its UUID into the corresponding field, to ensure that
* it is properly set in the StatisticsDescriptor created
* below.
*/
conglomerateUUID = cgd.getUUID();
}
CardinalityCounter cCount = (CardinalityCounter) rowSource;
long numRows = cCount.getRowCount();
if (addStatistics(dd, indexRowGenerator, numRows)) {
long[] c = cCount.getCardinality();
for (int i = 0; i < c.length; i++) {
StatisticsDescriptor statDesc = new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(), conglomerateUUID, td.getUUID(), "I", new StatisticsImpl(numRows, c[i]), i + 1);
dd.addDescriptor(statDesc, null, DataDictionary.SYSSTATISTICS_CATALOG_NUM, true, tc);
}
}
}
use of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator in project derby by apache.
the class CreateRoleConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for CREATE ROLE.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
TransactionController tc = lcc.getTransactionExecute();
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
if (roleName.equals(Authorizer.PUBLIC_AUTHORIZATION_ID)) {
throw StandardException.newException(SQLState.AUTH_PUBLIC_ILLEGAL_AUTHORIZATION_ID);
}
// currentAuthId is currently always the database owner since
// role definition is a database owner power. This may change
// in the future since this SQL is more liberal.
//
final String currentAuthId = lcc.getCurrentUserId(activation);
dd.startWriting(lcc);
//
// Check if this role already exists. If it does, throw.
//
RoleGrantDescriptor rdDef = dd.getRoleDefinitionDescriptor(roleName);
if (rdDef != null) {
throw StandardException.newException(SQLState.LANG_OBJECT_ALREADY_EXISTS, rdDef.getDescriptorType(), roleName);
}
// defined or added later).
if (knownUser(roleName, currentAuthId, lcc, dd, tc)) {
throw StandardException.newException(SQLState.LANG_OBJECT_ALREADY_EXISTS, "User", roleName);
}
rdDef = ddg.newRoleGrantDescriptor(dd.getUUIDFactory().createUUID(), roleName, // grantee
currentAuthId, // grantor
Authorizer.SYSTEM_AUTHORIZATION_ID, // with admin option
true, // is definition
true);
dd.addDescriptor(rdDef, // parent
null, DataDictionary.SYSROLES_CATALOG_NUM, // duplicatesAllowed
false, tc);
}
use of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator in project derby by apache.
the class CreateSchemaConstantAction method executeConstantActionMinion.
private void executeConstantActionMinion(Activation activation, TransactionController tc) throws StandardException {
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, lcc.getTransactionExecute(), false);
// This is to handle in-memory SESSION schema for temp tables
if ((sd != null) && (sd.getUUID() != null)) {
throw StandardException.newException(SQLState.LANG_OBJECT_ALREADY_EXISTS, "Schema", schemaName);
}
UUID tmpSchemaId = dd.getUUIDFactory().createUUID();
/*
** AID defaults to connection authorization if not
** specified in CREATE SCHEMA (if we had module
** authorizations, that would be the first check
** for default, then session aid).
*/
String thisAid = aid;
if (thisAid == null) {
thisAid = lcc.getCurrentUserId(activation);
}
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
sd = ddg.newSchemaDescriptor(schemaName, thisAid, tmpSchemaId);
dd.addDescriptor(sd, null, DataDictionary.SYSSCHEMAS_CATALOG_NUM, false, tc);
}
use of org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator in project derby by apache.
the class SYSCONSTRAINTSRowFactory method buildDescriptor.
// /////////////////////////////////////////////////////////////////////////
//
// ABSTRACT METHODS TO BE IMPLEMENTED BY CHILDREN OF CatalogRowFactory
//
// /////////////////////////////////////////////////////////////////////////
/**
* Make a ConstraintDescriptor out of a SYSCONSTRAINTS row
*
* @param row a SYSCONSTRAINTS row
* @param parentTupleDescriptor Subconstraint descriptor with auxiliary info.
* @param dd dataDictionary
*
* @exception StandardException thrown on failure
*/
public TupleDescriptor buildDescriptor(ExecRow row, TupleDescriptor parentTupleDescriptor, DataDictionary dd) throws StandardException {
ConstraintDescriptor constraintDesc = null;
if (SanityManager.DEBUG) {
SanityManager.ASSERT(row.nColumns() == SYSCONSTRAINTS_COLUMN_COUNT, "Wrong number of columns for a SYSCONSTRAINTS row");
}
DataValueDescriptor col;
ConglomerateDescriptor conglomDesc;
DataDescriptorGenerator ddg;
TableDescriptor td = null;
int constraintIType = -1;
int[] keyColumns = null;
UUID constraintUUID;
UUID schemaUUID;
UUID tableUUID;
UUID referencedConstraintId = null;
SchemaDescriptor schema;
String tableUUIDString;
String constraintName;
String constraintSType;
String constraintStateStr;
boolean deferrable = ConstraintDefinitionNode.DEFERRABLE_DEFAULT;
boolean initiallyDeferred = ConstraintDefinitionNode.INITIALLY_DEFERRED_DEFAULT;
boolean enforced = ConstraintDefinitionNode.ENFORCED_DEFAULT;
int referenceCount;
String constraintUUIDString;
String schemaUUIDString;
SubConstraintDescriptor scd;
if (SanityManager.DEBUG) {
if (!(parentTupleDescriptor instanceof SubConstraintDescriptor)) {
SanityManager.THROWASSERT("parentTupleDescriptor expected to be instanceof " + "SubConstraintDescriptor, not " + parentTupleDescriptor.getClass().getName());
}
}
scd = (SubConstraintDescriptor) parentTupleDescriptor;
ddg = dd.getDataDescriptorGenerator();
/* 1st column is CONSTRAINTID (UUID - char(36)) */
col = row.getColumn(SYSCONSTRAINTS_CONSTRAINTID);
constraintUUIDString = col.getString();
constraintUUID = getUUIDFactory().recreateUUID(constraintUUIDString);
/* 2nd column is TABLEID (UUID - char(36)) */
col = row.getColumn(SYSCONSTRAINTS_TABLEID);
tableUUIDString = col.getString();
tableUUID = getUUIDFactory().recreateUUID(tableUUIDString);
/* Get the TableDescriptor.
* It may be cached in the SCD,
* otherwise we need to go to the
* DD.
*/
if (scd != null) {
td = scd.getTableDescriptor();
}
if (td == null) {
td = dd.getTableDescriptor(tableUUID);
}
/* 3rd column is NAME (varchar(128)) */
col = row.getColumn(SYSCONSTRAINTS_CONSTRAINTNAME);
constraintName = col.getString();
/* 4th column is TYPE (char(1)) */
col = row.getColumn(SYSCONSTRAINTS_TYPE);
constraintSType = col.getString();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(constraintSType.length() == 1, "Fourth column type incorrect");
}
boolean typeSet = false;
switch(constraintSType.charAt(0)) {
case 'P':
constraintIType = DataDictionary.PRIMARYKEY_CONSTRAINT;
typeSet = true;
case 'U':
if (!typeSet) {
constraintIType = DataDictionary.UNIQUE_CONSTRAINT;
typeSet = true;
}
case 'F':
if (!typeSet)
constraintIType = DataDictionary.FOREIGNKEY_CONSTRAINT;
if (SanityManager.DEBUG) {
if (!(parentTupleDescriptor instanceof SubKeyConstraintDescriptor)) {
SanityManager.THROWASSERT("parentTupleDescriptor expected to be instanceof " + "SubKeyConstraintDescriptor, not " + parentTupleDescriptor.getClass().getName());
}
}
conglomDesc = td.getConglomerateDescriptor(((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId());
/* Take care the rare case of conglomDesc being null. The
* reason is that our "td" is out of date. Another thread
* which was adding a constraint committed between the moment
* we got the table descriptor (conglomerate list) and the
* moment we scanned and got the constraint desc list. Since
* that thread just added a new row to SYSCONGLOMERATES,
* SYSCONSTRAINTS, etc. We wouldn't have wanted to lock the
* system tables just to prevent other threads from adding new
* rows.
*/
if (conglomDesc == null) {
// we can't be getting td from cache because if we are
// here, we must have been in dd's ddl mode (that's why
// the ddl thread went through), we are not done yet, the
// dd ref count is not 0, hence it couldn't have turned
// into COMPILE_ONLY mode
td = dd.getTableDescriptor(tableUUID);
if (scd != null)
scd.setTableDescriptor(td);
// try again now
conglomDesc = td.getConglomerateDescriptor(((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId());
}
if (SanityManager.DEBUG) {
SanityManager.ASSERT(conglomDesc != null, "conglomDesc is expected to be non-null for backing index");
}
keyColumns = conglomDesc.getIndexDescriptor().baseColumnPositions();
referencedConstraintId = ((SubKeyConstraintDescriptor) parentTupleDescriptor).getKeyConstraintId();
keyColumns = conglomDesc.getIndexDescriptor().baseColumnPositions();
break;
case 'C':
constraintIType = DataDictionary.CHECK_CONSTRAINT;
if (SanityManager.DEBUG) {
if (!(parentTupleDescriptor instanceof SubCheckConstraintDescriptor)) {
SanityManager.THROWASSERT("parentTupleDescriptor expected to be instanceof " + "SubCheckConstraintDescriptor, not " + parentTupleDescriptor.getClass().getName());
}
}
break;
default:
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("Fourth column value invalid");
}
}
/* 5th column is SCHEMAID (UUID - char(36)) */
col = row.getColumn(SYSCONSTRAINTS_SCHEMAID);
schemaUUIDString = col.getString();
schemaUUID = getUUIDFactory().recreateUUID(schemaUUIDString);
schema = dd.getSchemaDescriptor(schemaUUID, null);
/* 6th column is STATE (char(1)) */
col = row.getColumn(SYSCONSTRAINTS_STATE);
constraintStateStr = col.getString();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(constraintStateStr.length() == 1, "Sixth column (state) type incorrect");
}
//
switch(constraintStateStr.charAt(0)) {
case 'E':
deferrable = false;
initiallyDeferred = false;
enforced = true;
break;
case 'D':
deferrable = false;
initiallyDeferred = false;
enforced = false;
break;
case 'e':
deferrable = true;
initiallyDeferred = true;
enforced = true;
break;
case 'd':
deferrable = true;
initiallyDeferred = true;
enforced = false;
break;
case 'i':
deferrable = true;
initiallyDeferred = false;
enforced = true;
break;
case 'j':
deferrable = true;
initiallyDeferred = false;
enforced = false;
break;
default:
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("Invalidate state value '" + constraintStateStr + "' for constraint");
}
}
/* 7th column is REFERENCECOUNT, boolean */
col = row.getColumn(SYSCONSTRAINTS_REFERENCECOUNT);
referenceCount = col.getInt();
switch(constraintIType) {
case DataDictionary.PRIMARYKEY_CONSTRAINT:
constraintDesc = ddg.newPrimaryKeyConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, // genReferencedColumns(dd, td), //int referencedColumns[],
keyColumns, constraintUUID, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(), schema, enforced, referenceCount);
break;
case DataDictionary.UNIQUE_CONSTRAINT:
constraintDesc = ddg.newUniqueConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, // genReferencedColumns(dd, td), //int referencedColumns[],
keyColumns, constraintUUID, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(), schema, enforced, referenceCount);
break;
case DataDictionary.FOREIGNKEY_CONSTRAINT:
if (SanityManager.DEBUG) {
SanityManager.ASSERT(referenceCount == 0, "REFERENCECOUNT column is nonzero for fk constraint");
}
constraintDesc = ddg.newForeignKeyConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, // genReferencedColumns(dd, td), //int referencedColumns[],
keyColumns, constraintUUID, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(), schema, referencedConstraintId, enforced, ((SubKeyConstraintDescriptor) parentTupleDescriptor).getRaDeleteRule(), ((SubKeyConstraintDescriptor) parentTupleDescriptor).getRaUpdateRule());
break;
case DataDictionary.CHECK_CONSTRAINT:
if (SanityManager.DEBUG) {
SanityManager.ASSERT(referenceCount == 0, "REFERENCECOUNT column is nonzero for check constraint");
}
constraintDesc = ddg.newCheckConstraintDescriptor(td, constraintName, deferrable, initiallyDeferred, constraintUUID, ((SubCheckConstraintDescriptor) parentTupleDescriptor).getConstraintText(), ((SubCheckConstraintDescriptor) parentTupleDescriptor).getReferencedColumnsDescriptor(), schema, enforced);
break;
}
return constraintDesc;
}
Aggregations