use of org.apache.derby.iapi.store.access.ColumnOrdering in project derby by apache.
the class GroupByNode method generate.
/**
* generate the sort result set operating over the source
* result set. Adds distinct aggregates to the sort if
* necessary.
*
* @exception StandardException Thrown on error
*/
@Override
void generate(ActivationClassBuilder acb, MethodBuilder mb) throws StandardException {
FormatableArrayHolder orderingHolder;
/* Get the next ResultSet#, so we can number this ResultSetNode, its
* ResultColumnList and ResultSet.
*/
assignResultSetNumber();
// Get the final cost estimate from the child.
setCostEstimate(childResult.getFinalCostEstimate());
/*
** Get the column ordering for the sort. Note that
** for a scalar aggegate we may not have any ordering
** columns (if there are no distinct aggregates).
** WARNING: if a distinct aggregate is passed to
** SortResultSet it assumes that the last column
** is the distinct one. If this assumption changes
** then SortResultSet will have to change.
*/
orderingHolder = acb.getColumnOrdering(groupingList);
if (addDistinctAggregate) {
orderingHolder = acb.addColumnToOrdering(orderingHolder, addDistinctAggregateColumnNum);
}
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON("AggregateTrace")) {
StringBuilder s = new StringBuilder();
s.append("Group by column ordering is (");
ColumnOrdering[] ordering = orderingHolder.getArray(ColumnOrdering[].class);
for (int i = 0; i < ordering.length; i++) {
s.append(ordering[i].getColumnId());
s.append(" ");
}
s.append(")");
SanityManager.DEBUG("AggregateTrace", s.toString());
}
}
int orderingItem = acb.addItem(orderingHolder);
/*
** We have aggregates, so save the aggInfo
** struct in the activation and store the number
*/
if (SanityManager.DEBUG) {
SanityManager.ASSERT(aggInfo != null, "aggInfo not set up as expected");
}
int aggInfoItem = acb.addItem(aggInfo);
acb.pushGetResultSetFactoryExpression(mb);
// Generate the child ResultSet
childResult.generate(acb, mb);
mb.push(isInSortedOrder);
mb.push(aggInfoItem);
mb.push(orderingItem);
mb.push(acb.addItem(getResultColumns().buildRowTemplate()));
mb.push(getResultColumns().getTotalColumnSize());
mb.push(getResultSetNumber());
/* Generate a (Distinct)ScalarAggregateResultSet if scalar aggregates */
if ((groupingList == null) || (groupingList.size() == 0)) {
genScalarAggregateResultSet(acb, mb);
} else /* Generate a (Distinct)GroupedAggregateResultSet if grouped aggregates */
{
genGroupedAggregateResultSet(acb, mb);
}
}
use of org.apache.derby.iapi.store.access.ColumnOrdering in project derby by apache.
the class InsertResultSet method emptyIndexes.
/**
* Empty the indexes after doing a bulk insert replace
* where the table has 0 rows after the replace.
* RESOLVE: This method is ugly! Prior to 2.0, we simply
* scanned back across the table to build the indexes. We
* changed this in 2.0 to populate the sorters via a call back
* as we populated the table. Doing a 0 row replace into a
* table with indexes is a degenerate case, hence we allow
* ugly and unoptimized code.
*
* @exception StandardException Thrown on failure
*/
private void emptyIndexes(long newHeapConglom, InsertConstantAction constants, TableDescriptor td, DataDictionary dd, ExecRow fullTemplate) throws StandardException {
int numIndexes = constants.irgs.length;
ExecIndexRow[] idxRows = new ExecIndexRow[numIndexes];
ExecRow baseRows;
ColumnOrdering[][] order = new ColumnOrdering[numIndexes][];
int numColumns = td.getNumberOfColumns();
collation = new int[numIndexes][];
// Create the BitSet for mapping the partial row to the full row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
// Need to check each index for referenced columns
int numReferencedColumns = 0;
for (int index = 0; index < numIndexes; index++) {
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
for (int bcp = 0; bcp < baseColumnPositions.length; bcp++) {
if (!bitSet.get(baseColumnPositions[bcp])) {
bitSet.set(baseColumnPositions[bcp]);
numReferencedColumns++;
}
}
}
// We can finally create the partial base row
baseRows = activation.getExecutionFactory().getValueRow(numReferencedColumns);
// Fill in each base row with nulls of the correct data type
int colNumber = 0;
for (int index = 0; index < numColumns; index++) {
if (bitSet.get(index + 1)) {
colNumber++;
// NOTE: 1-based column numbers
baseRows.setColumn(colNumber, fullTemplate.getColumn(index + 1).cloneValue(false));
}
}
needToDropSort = new boolean[numIndexes];
sortIds = new long[numIndexes];
/* Do the initial set up before scanning the heap.
* For each index, build a single index row and a sorter.
*/
for (int index = 0; index < numIndexes; index++) {
// create a single index row template for each index
idxRows[index] = constants.irgs[index].getIndexRowTemplate();
// Get an index row based on the base row
// (This call is only necessary here because we need to pass a
// template to the sorter.)
constants.irgs[index].getIndexRow(baseRows, rl, idxRows[index], bitSet);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key
*/
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
boolean[] isAscending = constants.irgs[index].isAscending();
int numColumnOrderings;
SortObserver sortObserver;
final IndexRowGenerator indDes = cd.getIndexDescriptor();
if (indDes.isUnique() || indDes.isUniqueDeferrable()) {
numColumnOrderings = indDes.isUnique() ? baseColumnPositions.length : baseColumnPositions.length + 1;
String indexOrConstraintName = cd.getConglomerateName();
boolean deferred = false;
boolean uniqueDeferrable = false;
UUID uniqueDeferrableConstraintId = null;
if (cd.isConstraint()) {
// so, the index is backing up a constraint
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
deferred = lcc.isEffectivelyDeferred(lcc.getCurrentSQLSessionContext(activation), conDesc.getUUID());
uniqueDeferrable = conDesc.deferrable();
uniqueDeferrableConstraintId = conDesc.getUUID();
}
sortObserver = new UniqueIndexSortObserver(lcc, uniqueDeferrableConstraintId, // don't clone rows
false, uniqueDeferrable, deferred, indexOrConstraintName, idxRows[index], true, td.getName());
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(false, false, idxRows[index], true);
}
order[index] = new ColumnOrdering[numColumnOrderings];
for (int ii = 0; ii < isAscending.length; ii++) {
order[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
if (numColumnOrderings > isAscending.length) {
order[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
}
// create the sorters
sortIds[index] = tc.createSort((Properties) null, idxRows[index].getRowArrayClone(), order[index], sortObserver, // not in order
false, // est rows
rowCount, // est row size, -1 means no idea
-1);
needToDropSort[index] = true;
}
// Populate sorters and get the output of each sorter into a row
// source. The sorters have the indexed columns only and the columns
// are in the correct order.
rowSources = new RowLocationRetRowSource[numIndexes];
// Fill in the RowSources
SortController[] sorter = new SortController[numIndexes];
for (int index = 0; index < numIndexes; index++) {
sorter[index] = tc.openSort(sortIds[index]);
sorter[index].completedInserts();
rowSources[index] = tc.openSortRowSource(sortIds[index]);
}
long[] newIndexCongloms = new long[numIndexes];
// Populate each index
for (int index = 0; index < numIndexes; index++) {
ConglomerateController indexCC;
Properties properties = new Properties();
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
// Build the properties list for the new conglomerate
indexCC = tc.openCompiledConglomerate(false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, constants.indexSCOCIs[index], indexDCOCIs[index]);
// Get the properties on the old index
indexCC.getInternalTablePropertySet(properties);
/* Create the properties that language supplies when creating the
* the index. (The store doesn't preserve these.)
*/
int indexRowLength = idxRows[index].nColumns();
properties.put("baseConglomerateId", Long.toString(newHeapConglom));
if (cd.getIndexDescriptor().isUnique()) {
properties.put("nUniqueColumns", Integer.toString(indexRowLength - 1));
} else {
properties.put("nUniqueColumns", Integer.toString(indexRowLength));
}
if (cd.getIndexDescriptor().isUniqueWithDuplicateNulls() && !cd.getIndexDescriptor().hasDeferrableChecking()) {
properties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
}
properties.put("rowLocationColumn", Integer.toString(indexRowLength - 1));
properties.put("nKeyFields", Integer.toString(indexRowLength));
indexCC.close();
collation[index] = constants.irgs[index].getColumnCollationIds(td.getColumnDescriptorList());
// We can finally drain the sorter and rebuild the index
// Populate the index.
newIndexCongloms[index] = tc.createAndLoadConglomerate("BTREE", idxRows[index].getRowArray(), // default column sort order
null, collation[index], properties, TransactionController.IS_DEFAULT, rowSources[index], (long[]) null);
/* Update the DataDictionary
*
* Update sys.sysconglomerates with new conglomerate #, if the
* conglomerate is shared by duplicate indexes, all the descriptors
* for those indexes need to be updated with the new number.
*/
dd.updateConglomerateDescriptor(td.getConglomerateDescriptors(constants.indexCIDS[index]), newIndexCongloms[index], tc);
// Drop the old conglomerate
tc.dropConglomerate(constants.indexCIDS[index]);
}
}
use of org.apache.derby.iapi.store.access.ColumnOrdering in project derby by apache.
the class T_CreateConglomRet method t_017.
/**
* Test BTree.openScan(), BtreeScan.init(), BtreeScan.next(),
* BtreeScan.fetch() with descending indexes.
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_017(TransactionController tc) throws StandardException, T_Fail {
T_SecondaryIndexRow index_row = new T_SecondaryIndexRow();
// base row template - last column is just to make row long so that
// multiple pages are spanned.
DataValueDescriptor[] base_row = TemplateRow.newU8Row(4);
base_row[3] = new SQLChar();
String string_1500char = new String();
for (int i = 0; i < 300; i++) string_1500char += "mikem";
boolean ret_val = true;
long value = -1;
long[] col1 = { 1, 3, 4, 4, 4, 5, 5, 5, 6, 7, 9 };
long[] col2 = { 1, 1, 2, 4, 6, 2, 4, 6, 1, 1, 1 };
long[] col3 = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 };
// set of deleted rows to make scans more interesting
long[] d_col1 = { 0, 2, 3, 4, 4, 5, 5, 5, 6, 7, 8, 10, 11, 12 };
long[] d_col2 = { 1, 1, 2, 3, 5, 0, 3, 5, 0, 0, 1, 42, 42, 1 };
long[] d_col3 = { 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104 };
REPORT("Starting t_017");
// create the base table
long base_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", // base table template row
base_row, // column sort order - not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// initialize the secondary index row - pointing it at base row
index_row.init(base_row, base_cc.newRowLocationTemplate(), 5);
Properties properties = createProperties(// no current properties list
null, // don't allow duplicates
false, // 4 columns in index row
5, // non-unique index
5, // maintain parent links
true, // base conglom id
base_conglomid, // row loc in last column
4);
// create the index with all the columns in descending order
ColumnOrdering[] order = new ColumnOrdering[5];
// descending
order[0] = new T_ColumnOrderingImpl(0, false);
// descending
order[1] = new T_ColumnOrderingImpl(1, false);
// descending
order[2] = new T_ColumnOrderingImpl(2, false);
// descending
order[3] = new T_ColumnOrderingImpl(3, false);
// asccending
order[4] = new T_ColumnOrderingImpl(4, true);
long index_conglomid = tc.createConglomerate(// create a btree secondary
"BTREE", // row template
index_row.getRow(), // column sort order - default
order, // default collation
null, // properties
properties, // not temporary
TransactionController.IS_DEFAULT);
// Open the conglomerate.
ConglomerateController index_cc = tc.openConglomerate(index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a row.
T_SecondaryIndexRow template = new T_SecondaryIndexRow();
RowLocation row_loc = base_cc.newRowLocationTemplate();
template.init(base_row, row_loc, 5);
// insert them in reverse order just to make sure btree is sorting them
for (int i = col1.length - 1; i >= 0; i--) {
((SQLLongint) (template.getRow()[0])).setValue(col1[i]);
((SQLLongint) (template.getRow()[1])).setValue(col2[i]);
((SQLLongint) (template.getRow()[2])).setValue(col3[i]);
base_row[3] = new SQLChar(string_1500char);
base_cc.insertAndFetchLocation(base_row, row_loc);
// ")" + template);
if (index_cc.insert(template.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
index_cc.checkConsistency();
((B2IController) index_cc).debugConglomerate();
ret_val = t_desc_scan_test_cases(tc, index_conglomid, template);
// may or may not clean these up.
for (int i = d_col1.length - 1; i >= 0; i--) {
((SQLLongint) (template.getRow()[0])).setValue(d_col1[i]);
((SQLLongint) (template.getRow()[1])).setValue(d_col2[i]);
((SQLLongint) (template.getRow()[2])).setValue(d_col3[i]);
base_row[3] = new SQLChar(string_1500char);
base_cc.insertAndFetchLocation(base_row, row_loc);
// ")" + template);
if (index_cc.insert(template.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// now delete the row.
base_cc.delete(row_loc);
ScanController delete_scan = tc.openScan(index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, template.getRow(), ScanController.GE, null, template.getRow(), ScanController.GT);
if (!delete_scan.next()) {
throw T_Fail.testFailMsg("delete could not find key");
} else {
delete_scan.delete();
if (delete_scan.next())
throw T_Fail.testFailMsg("delete found more than one key");
}
delete_scan.close();
}
ret_val = t_desc_scan_test_cases(tc, index_conglomid, template);
// Close the conglomerate.
index_cc.close();
tc.commit();
REPORT("Ending t_017");
return (ret_val);
}
use of org.apache.derby.iapi.store.access.ColumnOrdering in project derby by apache.
the class T_CreateConglomRet method t_019.
/**
* Test BTree.openScan(), BtreeScan.init(), BtreeScan.next(),
* BtreeScan.fetch() with alternating ascending and descending coulmn
* sort order indexes.
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_019(TransactionController tc) throws StandardException, T_Fail {
T_SecondaryIndexRow index_row = new T_SecondaryIndexRow();
// base row template - last column is just to make row long so that
// multiple pages are spanned.
DataValueDescriptor[] base_row = TemplateRow.newU8Row(4);
base_row[3] = new SQLChar();
String string_1500char = new String();
for (int i = 0; i < 300; i++) string_1500char += "mikem";
boolean ret_val = true;
long value = -1;
long[] col1 = { 1, 3, 4, 4, 4, 5, 5, 5, 6, 7, 9 };
long[] col2 = { 1, 1, 2, 4, 6, 2, 4, 6, 1, 1, 1 };
long[] col3 = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 };
// set of deleted rows to make scans more interesting
long[] d_col1 = { 0, 2, 3, 4, 4, 5, 5, 5, 6, 7, 8, 10, 11, 12 };
long[] d_col2 = { 1, 1, 2, 3, 5, 0, 3, 5, 0, 0, 1, 42, 42, 1 };
long[] d_col3 = { 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104 };
REPORT("Starting t_019");
// create the base table
long base_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", // base table template row
base_row, // column sort order - not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// initialize the secondary index row - pointing it at base row
index_row.init(base_row, base_cc.newRowLocationTemplate(), 5);
Properties properties = createProperties(// no current properties list
null, // don't allow duplicates
false, // 4 columns in index row
5, // non-unique index
5, // maintain parent links
true, // base conglom id
base_conglomid, // row loc in last column
4);
// create the index with all the columns in descending order
ColumnOrdering[] order = new ColumnOrdering[5];
// Descending
order[0] = new T_ColumnOrderingImpl(0, false);
// Ascending
order[1] = new T_ColumnOrderingImpl(1, true);
// Ascending
order[2] = new T_ColumnOrderingImpl(2, true);
// descending
order[3] = new T_ColumnOrderingImpl(3, false);
// asccending
order[4] = new T_ColumnOrderingImpl(4, true);
long index_conglomid = tc.createConglomerate(// create a btree secondary
"BTREE", // row template
index_row.getRow(), // column sort order - default
order, // default collation
null, // properties
properties, // not temporary
TransactionController.IS_DEFAULT);
// Open the conglomerate.
ConglomerateController index_cc = tc.openConglomerate(index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a row.
T_SecondaryIndexRow template = new T_SecondaryIndexRow();
RowLocation row_loc = base_cc.newRowLocationTemplate();
template.init(base_row, row_loc, 5);
// insert them in reverse order just to make sure btree is sorting them
for (int i = col1.length - 1; i >= 0; i--) {
((SQLLongint) (template.getRow()[0])).setValue(col1[i]);
((SQLLongint) (template.getRow()[1])).setValue(col2[i]);
((SQLLongint) (template.getRow()[2])).setValue(col3[i]);
base_row[3] = new SQLChar(string_1500char);
base_cc.insertAndFetchLocation(base_row, row_loc);
// ")" + template);
if (index_cc.insert(template.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
index_cc.checkConsistency();
((B2IController) index_cc).debugConglomerate();
ret_val = t_ascdesc1_scan_test_cases(tc, index_conglomid, template);
// may or may not clean these up.
for (int i = d_col1.length - 1; i >= 0; i--) {
((SQLLongint) (template.getRow()[0])).setValue(d_col1[i]);
((SQLLongint) (template.getRow()[1])).setValue(d_col2[i]);
((SQLLongint) (template.getRow()[2])).setValue(d_col3[i]);
base_row[3] = new SQLChar(string_1500char);
base_cc.insertAndFetchLocation(base_row, row_loc);
// ")" + template);
if (index_cc.insert(template.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// now delete the row.
base_cc.delete(row_loc);
ScanController delete_scan = tc.openScan(index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, template.getRow(), ScanController.GE, null, template.getRow(), ScanController.GT);
if (!delete_scan.next()) {
throw T_Fail.testFailMsg("delete could not find key");
} else {
delete_scan.delete();
if (delete_scan.next())
throw T_Fail.testFailMsg("delete found more than one key");
}
delete_scan.close();
}
ret_val = t_ascdesc1_scan_test_cases(tc, index_conglomid, template);
// Close the conglomerate.
index_cc.close();
tc.commit();
REPORT("Ending t_019");
return (ret_val);
}
use of org.apache.derby.iapi.store.access.ColumnOrdering in project derby by apache.
the class CreateIndexConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for
* creating an index.
*
* <P>
* A index is represented as:
* <UL>
* <LI> ConglomerateDescriptor.
* </UL>
* No dependencies are created.
*
* @see ConglomerateDescriptor
* @see SchemaDescriptor
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID toid;
ColumnDescriptor columnDescriptor;
int[] baseColumnPositions;
IndexRowGenerator indexRowGenerator = null;
ExecRow[] baseRows;
ExecIndexRow[] indexRows;
ExecRow[] compactBaseRows;
GroupFetchScanController scan;
RowLocationRetRowSource rowSource;
long sortId;
int maxBaseColumnPosition = -1;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc, true);
/* Get the table descriptor. */
/* See if we can get the TableDescriptor
* from the Activation. (Will be there
* for backing indexes.)
*/
td = activation.getDDLTableDescriptor();
if (td == null) {
/* tableId will be non-null if adding an index to
* an existing table (as opposed to creating a
* table with a constraint with a backing index).
*/
if (tableId != null) {
td = dd.getTableDescriptor(tableId);
} else {
td = dd.getTableDescriptor(tableName, sd, tc);
}
}
if (td == null) {
throw StandardException.newException(SQLState.LANG_CREATE_INDEX_NO_TABLE, indexName, tableName);
}
if (td.getTableType() == TableDescriptor.SYSTEM_TABLE_TYPE) {
throw StandardException.newException(SQLState.LANG_CREATE_SYSTEM_INDEX_ATTEMPTED, indexName, tableName);
}
/* Get a shared table lock on the table. We need to lock table before
* invalidate dependents, otherwise, we may interfere with the
* compilation/re-compilation of DML/DDL. See beetle 4325 and $WS/
* docs/language/SolutionsToConcurrencyIssues.txt (point f).
*/
lockTableForDDL(tc, td.getHeapConglomerateId(), false);
// depended on this table (including this one)
if (!forCreateTable) {
dm.invalidateFor(td, DependencyManager.CREATE_INDEX, lcc);
}
// Translate the base column names to column positions
baseColumnPositions = new int[columnNames.length];
for (int i = 0; i < columnNames.length; i++) {
// Look up the column in the data dictionary
columnDescriptor = td.getColumnDescriptor(columnNames[i]);
if (columnDescriptor == null) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, columnNames[i], tableName);
}
TypeId typeId = columnDescriptor.getType().getTypeId();
// Don't allow a column to be created on a non-orderable type
ClassFactory cf = lcc.getLanguageConnectionFactory().getClassFactory();
boolean isIndexable = typeId.orderable(cf);
if (isIndexable && typeId.userType()) {
String userClass = typeId.getCorrespondingJavaTypeName();
// run the compare method.
try {
if (cf.isApplicationClass(cf.loadApplicationClass(userClass)))
isIndexable = false;
} catch (ClassNotFoundException cnfe) {
// shouldn't happen as we just check the class is orderable
isIndexable = false;
}
}
if (!isIndexable) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_ORDERABLE_DURING_EXECUTION, typeId.getSQLTypeName());
}
// Remember the position in the base table of each column
baseColumnPositions[i] = columnDescriptor.getPosition();
if (maxBaseColumnPosition < baseColumnPositions[i])
maxBaseColumnPosition = baseColumnPositions[i];
}
/* The code below tries to determine if the index that we're about
* to create can "share" a conglomerate with an existing index.
* If so, we will use a single physical conglomerate--namely, the
* one that already exists--to support both indexes. I.e. we will
* *not* create a new conglomerate as part of this constant action.
*
* Deferrable constraints are backed by indexes that are *not* shared
* since they use physically non-unique indexes and as such are
* different from indexes used to represent non-deferrable
* constraints.
*/
// check if we have similar indices already for this table
ConglomerateDescriptor[] congDescs = td.getConglomerateDescriptors();
boolean shareExisting = false;
for (int i = 0; i < congDescs.length; i++) {
ConglomerateDescriptor cd = congDescs[i];
if (!cd.isIndex())
continue;
if (droppedConglomNum == cd.getConglomerateNumber()) {
/* We can't share with any conglomerate descriptor
* whose conglomerate number matches the dropped
* conglomerate number, because that descriptor's
* backing conglomerate was dropped, as well. If
* we're going to share, we have to share with a
* descriptor whose backing physical conglomerate
* is still around.
*/
continue;
}
IndexRowGenerator irg = cd.getIndexDescriptor();
int[] bcps = irg.baseColumnPositions();
boolean[] ia = irg.isAscending();
int j = 0;
/* The conditions which allow an index to share an existing
* conglomerate are as follows:
*
* 1. the set of columns (both key and include columns) and their
* order in the index is the same as that of an existing index AND
*
* 2. the ordering attributes are the same AND
*
* 3. one of the following is true:
* a) the existing index is unique, OR
* b) the existing index is non-unique with uniqueWhenNotNulls
* set to TRUE and the index being created is non-unique, OR
* c) both the existing index and the one being created are
* non-unique and have uniqueWithDuplicateNulls set to FALSE.
*
* 4. hasDeferrableChecking is FALSE.
*/
boolean possibleShare = (irg.isUnique() || !unique) && (bcps.length == baseColumnPositions.length) && !hasDeferrableChecking;
// is set to true (backing index for unique constraint)
if (possibleShare && !irg.isUnique()) {
/* If the existing index has uniqueWithDuplicateNulls set to
* TRUE it can be shared by other non-unique indexes; otherwise
* the existing non-unique index has uniqueWithDuplicateNulls
* set to FALSE, which means the new non-unique conglomerate
* can only share if it has uniqueWithDuplicateNulls set to
* FALSE, as well.
*/
possibleShare = (irg.isUniqueWithDuplicateNulls() || !uniqueWithDuplicateNulls);
}
if (possibleShare && indexType.equals(irg.indexType())) {
for (; j < bcps.length; j++) {
if ((bcps[j] != baseColumnPositions[j]) || (ia[j] != isAscending[j]))
break;
}
}
if (// share
j == baseColumnPositions.length) {
/*
* Don't allow users to create a duplicate index. Allow if being done internally
* for a constraint
*/
if (!isConstraint) {
activation.addWarning(StandardException.newWarning(SQLState.LANG_INDEX_DUPLICATE, indexName, cd.getConglomerateName()));
return;
}
/* Sharing indexes share the physical conglomerate
* underneath, so pull the conglomerate number from
* the existing conglomerate descriptor.
*/
conglomId = cd.getConglomerateNumber();
/* We create a new IndexRowGenerator because certain
* attributes--esp. uniqueness--may be different between
* the index we're creating and the conglomerate that
* already exists. I.e. even though we're sharing a
* conglomerate, the new index is not necessarily
* identical to the existing conglomerate. We have to
* keep track of that info so that if we later drop
* the shared physical conglomerate, we can figure out
* what this index (the one we're creating now) is
* really supposed to look like.
*/
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, // uniqueDeferrable
false, // deferrable indexes are not shared
false, baseColumnPositions, isAscending, baseColumnPositions.length);
// DERBY-655 and DERBY-1343
// Sharing indexes will have unique logical conglomerate UUIDs.
conglomerateUUID = dd.getUUIDFactory().createUUID();
shareExisting = true;
break;
}
}
/* If we have a droppedConglomNum then the index we're about to
* "create" already exists--i.e. it has an index descriptor and
* the corresponding information is already in the system catalogs.
* The only thing we're missing, then, is the physical conglomerate
* to back the index (because the old conglomerate was dropped).
*/
boolean alreadyHaveConglomDescriptor = (droppedConglomNum > -1L);
/* If this index already has an essentially same one, we share the
* conglomerate with the old one, and just simply add a descriptor
* entry into SYSCONGLOMERATES--unless we already have a descriptor,
* in which case we don't even need to do that.
*/
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
if (shareExisting && !alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
// can't just return yet, need to get member "indexTemplateRow"
// because create constraint may use it
}
// Describe the properties of the index to the store using Properties
// RESOLVE: The following properties assume a BTREE index.
Properties indexProperties;
if (properties != null) {
indexProperties = properties;
} else {
indexProperties = new Properties();
}
// Tell it the conglomerate id of the base table
indexProperties.put("baseConglomerateId", Long.toString(td.getHeapConglomerateId()));
if (uniqueWithDuplicateNulls && !hasDeferrableChecking) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexProperties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
} else {
// index creating a unique index instead.
if (uniqueWithDuplicateNulls) {
unique = true;
}
}
}
// All indexes are unique because they contain the RowLocation.
// The number of uniqueness columns must include the RowLocation
// if the user did not specify a unique index.
indexProperties.put("nUniqueColumns", Integer.toString(unique ? baseColumnPositions.length : baseColumnPositions.length + 1));
// By convention, the row location column is the last column
indexProperties.put("rowLocationColumn", Integer.toString(baseColumnPositions.length));
// For now, all columns are key fields, including the RowLocation
indexProperties.put("nKeyFields", Integer.toString(baseColumnPositions.length + 1));
// For now, assume that all index columns are ordered columns
if (!shareExisting) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, uniqueDeferrable, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), baseColumnPositions, isAscending, baseColumnPositions.length);
} else {
indexRowGenerator = new IndexRowGenerator(indexType, unique, false, false, false, baseColumnPositions, isAscending, baseColumnPositions.length);
}
}
/* Now add the rows from the base table to the conglomerate.
* We do this by scanning the base table and inserting the
* rows into a sorter before inserting from the sorter
* into the index. This gives us better performance
* and a more compact index.
*/
rowSource = null;
sortId = 0;
// set to true once the sorter is created
boolean needToDropSort = false;
/* bulkFetchSIze will be 16 (for now) unless
* we are creating the table in which case it
* will be 1. Too hard to remove scan when
* creating index on new table, so minimize
* work where we can.
*/
int bulkFetchSize = (forCreateTable) ? 1 : 16;
int numColumns = td.getNumberOfColumns();
int approximateRowSize = 0;
// Create the FormatableBitSet for mapping the partial to full base row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
for (int index = 0; index < baseColumnPositions.length; index++) {
bitSet.set(baseColumnPositions[index]);
}
FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1);
// Start by opening a full scan on the base table.
scan = tc.openGroupFetchScan(td.getHeapConglomerateId(), // hold
false, // open base table read only
0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, // all fields as objects
zeroBasedBitSet, // startKeyValue
(DataValueDescriptor[]) null, // not used when giving null start posn.
0, // qualifier
null, // stopKeyValue
(DataValueDescriptor[]) null, // not used when giving null stop posn.
0);
// Create an array to put base row template
baseRows = new ExecRow[bulkFetchSize];
indexRows = new ExecIndexRow[bulkFetchSize];
compactBaseRows = new ExecRow[bulkFetchSize];
try {
// Create the array of base row template
for (int i = 0; i < bulkFetchSize; i++) {
// create a base row template
baseRows[i] = activation.getExecutionFactory().getValueRow(maxBaseColumnPosition);
// create an index row template
indexRows[i] = indexRowGenerator.getIndexRowTemplate();
// create a compact base row template
compactBaseRows[i] = activation.getExecutionFactory().getValueRow(baseColumnPositions.length);
}
indexTemplateRow = indexRows[0];
// Fill the partial row with nulls of the correct type
ColumnDescriptorList cdl = td.getColumnDescriptorList();
int cdlSize = cdl.size();
for (int index = 0, numSet = 0; index < cdlSize; index++) {
if (!zeroBasedBitSet.get(index)) {
continue;
}
numSet++;
ColumnDescriptor cd = cdl.elementAt(index);
DataTypeDescriptor dts = cd.getType();
for (int i = 0; i < bulkFetchSize; i++) {
// Put the column in both the compact and sparse base rows
baseRows[i].setColumn(index + 1, dts.getNull());
compactBaseRows[i].setColumn(numSet, baseRows[i].getColumn(index + 1));
}
// Calculate the approximate row size for the index row
approximateRowSize += dts.getTypeId().getApproximateLengthInBytes(dts);
}
// Get an array of RowLocation template
RowLocation[] rl = new RowLocation[bulkFetchSize];
for (int i = 0; i < bulkFetchSize; i++) {
rl[i] = scan.newRowLocationTemplate();
// Get an index row based on the base row
indexRowGenerator.getIndexRow(compactBaseRows[i], rl[i], indexRows[i], bitSet);
}
/* now that we got indexTemplateRow, done for sharing index
*/
if (shareExisting)
return;
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key.
* We create a basic sort observer for non-unique indexes
* so that we can reuse the wrappers during an external
* sort.
*/
int numColumnOrderings;
SortObserver sortObserver;
Properties sortProperties = null;
if (unique || uniqueWithDuplicateNulls || uniqueDeferrable) {
// if the index is a constraint, use constraintname in
// possible error message
String indexOrConstraintName = indexName;
if (conglomerateUUID != null) {
ConglomerateDescriptor cd = dd.getConglomerateDescriptor(conglomerateUUID);
if ((isConstraint) && (cd != null && cd.getUUID() != null && td != null)) {
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
}
}
if (unique || uniqueDeferrable) {
numColumnOrderings = unique ? baseColumnPositions.length : baseColumnPositions.length + 1;
sortObserver = new UniqueIndexSortObserver(lcc, constraintID, true, uniqueDeferrable, initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
} else {
// unique with duplicate nulls allowed.
numColumnOrderings = baseColumnPositions.length + 1;
// tell transaction controller to use the unique with
// duplicate nulls sorter, when making createSort() call.
sortProperties = new Properties();
sortProperties.put(AccessFactoryGlobals.IMPL_TYPE, AccessFactoryGlobals.SORT_UNIQUEWITHDUPLICATENULLS_EXTERNAL);
// use sort operator which treats nulls unequal
sortObserver = new UniqueWithDuplicateNullsIndexSortObserver(lcc, constraintID, true, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
}
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(true, false, indexTemplateRow, true);
}
ColumnOrdering[] order = new ColumnOrdering[numColumnOrderings];
for (int i = 0; i < numColumnOrderings; i++) {
order[i] = new IndexColumnOrder(i, unique || i < numColumnOrderings - 1 ? isAscending[i] : true);
}
// create the sorter
sortId = tc.createSort(sortProperties, indexTemplateRow.getRowArrayClone(), order, sortObserver, // not in order
false, scan.getEstimatedRowCount(), // est row size, -1 means no idea
approximateRowSize);
needToDropSort = true;
// Populate sorter and get the output of the sorter into a row
// source. The sorter has the indexed columns only and the columns
// are in the correct order.
rowSource = loadSorter(baseRows, indexRows, tc, scan, sortId, rl);
conglomId = tc.createAndLoadConglomerate(indexType, // index row template
indexTemplateRow.getRowArray(), // colums sort order
order, indexRowGenerator.getColumnCollationIds(td.getColumnDescriptorList()), indexProperties, // not temporary
TransactionController.IS_DEFAULT, rowSource, (long[]) null);
} finally {
/* close the table scan */
if (scan != null)
scan.close();
/* close the sorter row source before throwing exception */
if (rowSource != null)
rowSource.closeRowSource();
/*
** drop the sort so that intermediate external sort run can be
** removed from disk
*/
if (needToDropSort)
tc.dropSort(sortId);
}
ConglomerateController indexController = tc.openConglomerate(conglomId, false, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
// Check to make sure that the conglomerate can be used as an index
if (!indexController.isKeyed()) {
indexController.close();
throw StandardException.newException(SQLState.LANG_NON_KEYED_INDEX, indexName, indexType);
}
indexController.close();
//
if (!alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
/* Since we created a new conglomerate descriptor, load
* its UUID into the corresponding field, to ensure that
* it is properly set in the StatisticsDescriptor created
* below.
*/
conglomerateUUID = cgd.getUUID();
}
CardinalityCounter cCount = (CardinalityCounter) rowSource;
long numRows = cCount.getRowCount();
if (addStatistics(dd, indexRowGenerator, numRows)) {
long[] c = cCount.getCardinality();
for (int i = 0; i < c.length; i++) {
StatisticsDescriptor statDesc = new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(), conglomerateUUID, td.getUUID(), "I", new StatisticsImpl(numRows, c[i]), i + 1);
dd.addDescriptor(statDesc, null, DataDictionary.SYSSTATISTICS_CATALOG_NUM, true, tc);
}
}
}
Aggregations