use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class BinaryRelationalOperatorNode method getAbsoluteColumnPosition.
/**
* Get the absolute 0-based column position of the ColumnReference from
* the conglomerate for this Optimizable.
*
* @param optTable The Optimizable
*
* @return The absolute 0-based column position of the ColumnReference
*/
private int getAbsoluteColumnPosition(Optimizable optTable) {
ColumnReference cr;
ConglomerateDescriptor bestCD;
int columnPosition;
if (keyColumnOnLeft(optTable)) {
cr = (ColumnReference) leftOperand;
} else {
cr = (ColumnReference) rightOperand;
}
bestCD = optTable.getTrulyTheBestAccessPath().getConglomerateDescriptor();
/*
** Column positions are one-based, store is zero-based.
*/
columnPosition = cr.getSource().getColumnPosition();
/*
** If it's an index, find the base column position in the index
** and translate it to an index column position.
*/
if (bestCD != null && bestCD.isIndex()) {
columnPosition = bestCD.getIndexDescriptor().getKeyColumnPosition(columnPosition);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(columnPosition > 0, "Base column not found in index");
}
}
// return the 0-based column position
return columnPosition - 1;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class InsertResultSet method updateAllIndexes.
/**
* Update all of the indexes on a table when doing a bulk insert
* on an empty table.
*
* @exception StandardException thrown on error
*/
private void updateAllIndexes(long newHeapConglom, InsertConstantAction constants, TableDescriptor td, DataDictionary dd, ExecRow fullTemplate) throws StandardException {
int numIndexes = constants.irgs.length;
/*
** If we didn't actually read in any rows, then
** we don't need to do anything, unless we were
** doing a replace.
*/
if (indexRows == null) {
if (bulkInsertReplace) {
emptyIndexes(newHeapConglom, constants, td, dd, fullTemplate);
}
return;
}
dd.dropStatisticsDescriptors(td.getUUID(), null, tc);
long[] newIndexCongloms = new long[numIndexes];
indexConversionTable = new Hashtable<Long, Long>(numIndexes);
// Populate each index
for (int index = 0; index < numIndexes; index++) {
ConglomerateController indexCC;
Properties properties = new Properties();
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
// Build the properties list for the new conglomerate
indexCC = tc.openCompiledConglomerate(false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, constants.indexSCOCIs[index], indexDCOCIs[index]);
// Get the properties on the old index
indexCC.getInternalTablePropertySet(properties);
/* Create the properties that language supplies when creating the
* the index. (The store doesn't preserve these.)
*/
int indexRowLength = indexRows[index].nColumns();
properties.put("baseConglomerateId", Long.toString(newHeapConglom));
if (cd.getIndexDescriptor().isUnique()) {
properties.put("nUniqueColumns", Integer.toString(indexRowLength - 1));
} else {
properties.put("nUniqueColumns", Integer.toString(indexRowLength));
}
if (cd.getIndexDescriptor().isUniqueWithDuplicateNulls() && !cd.getIndexDescriptor().hasDeferrableChecking()) {
properties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
}
properties.put("rowLocationColumn", Integer.toString(indexRowLength - 1));
properties.put("nKeyFields", Integer.toString(indexRowLength));
indexCC.close();
// We can finally drain the sorter and rebuild the index
// RESOLVE - all indexes are btrees right now
// Populate the index.
sorters[index].completedInserts();
sorters[index] = null;
rowSources[index] = new CardinalityCounter(tc.openSortRowSource(sortIds[index]));
newIndexCongloms[index] = tc.createAndLoadConglomerate("BTREE", indexRows[index].getRowArray(), ordering[index], collation[index], properties, TransactionController.IS_DEFAULT, rowSources[index], (long[]) null);
CardinalityCounter cCount = (CardinalityCounter) rowSources[index];
long numRows;
if ((numRows = cCount.getRowCount()) > 0) {
long[] c = cCount.getCardinality();
for (int i = 0; i < c.length; i++) {
StatisticsDescriptor statDesc = new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(), cd.getUUID(), td.getUUID(), "I", new StatisticsImpl(numRows, c[i]), i + 1);
dd.addDescriptor(statDesc, null, DataDictionary.SYSSTATISTICS_CATALOG_NUM, true, tc);
}
}
/* Update the DataDictionary
* RESOLVE - this will change in 1.4 because we will get
* back the same conglomerate number
*
* Update sys.sysconglomerates with new conglomerate #, if the
* conglomerate is shared by duplicate indexes, all the descriptors
* for those indexes need to be updated with the new number.
*/
dd.updateConglomerateDescriptor(td.getConglomerateDescriptors(constants.indexCIDS[index]), newIndexCongloms[index], tc);
// Drop the old conglomerate
tc.dropConglomerate(constants.indexCIDS[index]);
indexConversionTable.put(constants.indexCIDS[index], newIndexCongloms[index]);
}
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class InsertResultSet method bulkInsertCore.
// Do the work for a bulk insert
private void bulkInsertCore(LanguageConnectionContext lcc, ExecRow fullTemplate, long oldHeapConglom) throws StandardException {
bulkHeapCC = tc.openCompiledConglomerate(false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, constants.heapSCOCI, heapDCOCI);
long newHeapConglom;
Properties properties = new Properties();
// Get the properties on the old heap
bulkHeapCC.getInternalTablePropertySet(properties);
if (triggerInfo != null) {
// no triggers in bulk insert mode
if (SanityManager.DEBUG) {
SanityManager.NOTREACHED();
}
}
/*
** If we have a before row trigger, then we
** are going to use a row holder pass to our
** trigger.
*/
if (hasBeforeRowTrigger && rowHolder != null) {
rowHolder = new TemporaryRowHolderImpl(activation, properties, resultDescription);
}
// Add any new properties or change the values of any existing properties
Properties targetProperties = constants.getTargetProperties();
Enumeration key = targetProperties.keys();
while (key.hasMoreElements()) {
String keyValue = (String) key.nextElement();
properties.put(keyValue, targetProperties.getProperty(keyValue));
}
// Are there indexes to be updated?
if (constants.irgs.length > 0) {
// Tell source whether or not we need the RIDs back
sourceResultSet.setNeedsRowLocation(true);
}
if (constants.hasDeferrableChecks) {
sourceResultSet.setHasDeferrableChecks();
}
dd = lcc.getDataDictionary();
td = dd.getTableDescriptor(constants.targetUUID);
/* Do the bulk insert - only okay to reuse the
* same conglomerate if bulkInsert.
*/
long[] loadedRowCount = new long[1];
if (bulkInsertReplace) {
newHeapConglom = tc.createAndLoadConglomerate("heap", fullTemplate.getRowArray(), // column sort order - not required for heap
null, td.getColumnCollationIds(), properties, TransactionController.IS_DEFAULT, sourceResultSet, loadedRowCount);
} else {
newHeapConglom = tc.recreateAndLoadConglomerate("heap", false, fullTemplate.getRowArray(), // column sort order - not required for heap
null, td.getColumnCollationIds(), properties, TransactionController.IS_DEFAULT, oldHeapConglom, sourceResultSet, loadedRowCount);
}
/* Nothing else to do if we get back the same conglomerate number.
* (In 2.0 this means that 0 rows were inserted.)
*/
if (newHeapConglom == oldHeapConglom) {
return;
}
// Find out how many rows were inserted
rowCount = loadedRowCount[0];
// Set the "estimated" row count
setEstimatedRowCount(newHeapConglom);
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
//
if (identitySequenceUUIDString == null) {
lcc.autoincrementFlushCache(constants.targetUUID);
} else {
for (BulkInsertCounter bic : bulkInsertCounters) {
if (bic != null) {
dd.flushBulkInsertCounter(identitySequenceUUIDString, bic);
}
}
}
// invalidate any prepared statements that
// depended on this table (including this one)
DependencyManager dm = dd.getDependencyManager();
dm.invalidateFor(td, DependencyManager.BULK_INSERT, lcc);
// Update all indexes
if (constants.irgs.length > 0) {
// MEN VI HAR MANGE SORTS, EN PR INDEX: alle blir droppet, hvordan
// assossiere alle med nye indekser som tildeles inni her???
// FIXME!!
updateAllIndexes(newHeapConglom, constants, td, dd, fullTemplate);
}
// Drop the old conglomerate
bulkHeapCC.close();
bulkHeapCC = null;
/* Update the DataDictionary
* RESOLVE - this will change in 1.4 because we will get
* back the same conglomerate number
*/
// Get the ConglomerateDescriptor for the heap
ConglomerateDescriptor cd = td.getConglomerateDescriptor(oldHeapConglom);
// Update sys.sysconglomerates with new conglomerate #
dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
tc.dropConglomerate(oldHeapConglom);
// END RESOLVE
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class InsertResultSet method setUpAllSorts.
/**
* Set up to update all of the indexes on a table when doing a bulk insert
* on an empty table.
*
* @exception StandardException thrown on error
*/
private void setUpAllSorts(ExecRow sourceRow, RowLocation rl) throws StandardException {
int numIndexes = constants.irgs.length;
int numColumns = td.getNumberOfColumns();
ordering = new ColumnOrdering[numIndexes][];
collation = new int[numIndexes][];
needToDropSort = new boolean[numIndexes];
sortIds = new long[numIndexes];
rowSources = new RowLocationRetRowSource[numIndexes];
// indexedCols is 1-based
indexedCols = new FormatableBitSet(numColumns + 1);
/* For each index, build a single index row, collation templage,
* and a sorter.
*/
for (int index = 0; index < numIndexes; index++) {
// Update the bit map of indexed columns
int[] keyColumns = constants.irgs[index].baseColumnPositions();
for (int i2 = 0; i2 < keyColumns.length; i2++) {
// indexedCols is 1-based
indexedCols.set(keyColumns[i2]);
}
// create a single index row template for each index
indexRows[index] = constants.irgs[index].getIndexRowTemplate();
// Get an index row based on the base row
// (This call is only necessary here because we need to
// pass a template to the sorter.)
constants.irgs[index].getIndexRow(sourceRow, rl, indexRows[index], (FormatableBitSet) null);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key
*/
// Get the ConglomerateDescriptor for the index
ConglomerateDescriptor cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
boolean[] isAscending = constants.irgs[index].isAscending();
int numColumnOrderings;
SortObserver sortObserver;
/* We can only reuse the wrappers when doing an
* external sort if there is only 1 index. Otherwise,
* we could get in a situation where 1 sort reuses a
* wrapper that is still in use in another sort.
*/
boolean reuseWrappers = (numIndexes == 1);
final IndexRowGenerator indDes = cd.getIndexDescriptor();
Properties sortProperties = null;
String indexOrConstraintName = cd.getConglomerateName();
boolean deferred = false;
boolean deferrable = false;
UUID uniqueDeferrableConstraintId = null;
if (cd.isConstraint()) {
// so, the index is backing up a constraint
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
deferred = lcc.isEffectivelyDeferred(lcc.getCurrentSQLSessionContext(activation), conDesc.getUUID());
deferrable = conDesc.deferrable();
uniqueDeferrableConstraintId = conDesc.getUUID();
}
if (indDes.isUnique() || indDes.isUniqueDeferrable()) {
numColumnOrderings = indDes.isUnique() ? baseColumnPositions.length : baseColumnPositions.length + 1;
sortObserver = new UniqueIndexSortObserver(lcc, uniqueDeferrableConstraintId, // don't clone rows
false, deferrable, deferred, indexOrConstraintName, indexRows[index], reuseWrappers, td.getName());
} else if (indDes.isUniqueWithDuplicateNulls()) {
numColumnOrderings = baseColumnPositions.length + 1;
// tell transaction controller to use the unique with
// duplicate nulls sorter, when making createSort() call.
sortProperties = new Properties();
sortProperties.put(AccessFactoryGlobals.IMPL_TYPE, AccessFactoryGlobals.SORT_UNIQUEWITHDUPLICATENULLS_EXTERNAL);
// use sort operator which treats nulls unequal
sortObserver = new UniqueWithDuplicateNullsIndexSortObserver(lcc, uniqueDeferrableConstraintId, true, deferrable, deferred, indexOrConstraintName, indexRows[index], true, td.getName());
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(false, false, indexRows[index], reuseWrappers);
}
ordering[index] = new ColumnOrdering[numColumnOrderings];
for (int ii = 0; ii < isAscending.length; ii++) {
ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
if (numColumnOrderings > isAscending.length) {
ordering[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
}
// set collation templates for later index creation
// call (createAndLoadConglomerate())
collation[index] = constants.irgs[index].getColumnCollationIds(td.getColumnDescriptorList());
// create the sorters
sortIds[index] = tc.createSort(sortProperties, indexRows[index].getRowArrayClone(), ordering[index], sortObserver, // not in order
false, // est rows
(int) sourceResultSet.getEstimatedRowCount(), // est row size, -1 means no idea
-1);
needToDropSort[index] = true;
}
sorters = new SortController[numIndexes];
// Open the sorts
for (int index = 0; index < numIndexes; index++) {
sorters[index] = tc.openSort(sortIds[index]);
needToDropSort[index] = true;
}
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class DMLModStatementNode method generateFKInfo.
/**
* Generate the FKInfo structures used during code generation.
* For each constraint that isn't a check constraint, add another
* one of these FKInfo structures and then package them up into
* a single array.
*
* @param cdl The constraint descriptor list
* @param dd The DataDictionary
* @param td The TableDescriptor
* @param readColsBitSet columns read
*
* @exception StandardException Thrown on failure
*/
private void generateFKInfo(ConstraintDescriptorList cdl, DataDictionary dd, TableDescriptor td, FormatableBitSet readColsBitSet) throws StandardException {
ArrayList<FKInfo> fkList = new ArrayList<FKInfo>();
int type;
UUID[] uuids;
long[] conglomNumbers;
String[] fkNames;
ConstraintDescriptorList fkcdl;
ReferencedKeyConstraintDescriptor refcd;
boolean[] isSelfReferencingFK;
ConstraintDescriptorList activeList = dd.getActiveConstraintDescriptors(cdl);
int[] rowMap = getRowMap(readColsBitSet, td);
int[] raRules;
boolean[] deferrable;
UUID[] fkIds;
ArrayList<String> refSchemaNames = new ArrayList<String>(1);
ArrayList<String> refTableNames = new ArrayList<String>(1);
ArrayList<Long> refIndexConglomNum = new ArrayList<Long>(1);
ArrayList<Integer> refActions = new ArrayList<Integer>(1);
ArrayList<ColumnDescriptorList> refColDescriptors = new ArrayList<ColumnDescriptorList>(1);
ArrayList<int[]> fkColMap = new ArrayList<int[]>(1);
int activeSize = activeList.size();
for (int index = 0; index < activeSize; index++) {
ConstraintDescriptor cd = activeList.elementAt(index);
if (cd instanceof ForeignKeyConstraintDescriptor) {
/*
** We are saving information for checking the
** primary/unique key that is referenced by this
** foreign key, so type is FOREIGN KEY.
*/
type = FKInfo.FOREIGN_KEY;
refcd = ((ForeignKeyConstraintDescriptor) cd).getReferencedConstraint();
uuids = new UUID[1];
deferrable = new boolean[1];
fkIds = new UUID[1];
conglomNumbers = new long[1];
fkNames = new String[1];
isSelfReferencingFK = new boolean[1];
raRules = new int[1];
fkSetupArrays(dd, (ForeignKeyConstraintDescriptor) cd, 0, uuids, conglomNumbers, fkNames, isSelfReferencingFK, raRules, deferrable, fkIds);
// oops, get the right constraint name -- for error
// handling we want the FK name, not refcd name
fkNames[0] = cd.getConstraintName();
} else if (cd instanceof ReferencedKeyConstraintDescriptor) {
refcd = (ReferencedKeyConstraintDescriptor) cd;
/*
** We are saving information for checking the
** foreign key(s) that is dependent on this referenced
** key, so type is REFERENCED KEY.
*/
type = FKInfo.REFERENCED_KEY;
fkcdl = dd.getActiveConstraintDescriptors(((ReferencedKeyConstraintDescriptor) cd).getForeignKeyConstraints(ConstraintDescriptor.ENABLED));
int size = fkcdl.size();
if (size == 0) {
continue;
}
uuids = new UUID[size];
deferrable = new boolean[size];
fkIds = new UUID[size];
fkNames = new String[size];
conglomNumbers = new long[size];
isSelfReferencingFK = new boolean[size];
raRules = new int[size];
TableDescriptor fktd;
ColumnDescriptorList coldl;
int[] refColumns;
ColumnDescriptor cold;
int[] colArray = remapReferencedColumns(cd, rowMap);
for (int inner = 0; inner < size; inner++) {
ForeignKeyConstraintDescriptor fkcd = (ForeignKeyConstraintDescriptor) fkcdl.elementAt(inner);
fkSetupArrays(dd, fkcd, inner, uuids, conglomNumbers, fkNames, isSelfReferencingFK, raRules, deferrable, fkIds);
if ((raRules[inner] == StatementType.RA_CASCADE) || (raRules[inner] == StatementType.RA_SETNULL)) {
// find the referencing table Name
fktd = fkcd.getTableDescriptor();
refSchemaNames.add(fktd.getSchemaName());
refTableNames.add(fktd.getName());
refActions.add(Integer.valueOf(raRules[inner]));
// find the referencing column name required for update null.
refColumns = fkcd.getReferencedColumns();
coldl = fktd.getColumnDescriptorList();
ColumnDescriptorList releventColDes = new ColumnDescriptorList();
for (int i = 0; i < refColumns.length; i++) {
cold = coldl.elementAt(refColumns[i] - 1);
releventColDes.add(cold);
}
refColDescriptors.add(releventColDes);
refIndexConglomNum.add(Long.valueOf(conglomNumbers[inner]));
fkColMap.add(colArray);
}
}
} else {
continue;
}
final TableDescriptor pktd = refcd.getTableDescriptor();
final UUID pkIndexId = refcd.getIndexId();
final ConglomerateDescriptor pkIndexConglom = pktd.getConglomerateDescriptor(pkIndexId);
final TableDescriptor refTd = cd.getTableDescriptor();
fkList.add(new FKInfo(// foreign key names
fkNames, cd.getSchemaDescriptor().getSchemaName(), // table being modified
refTd.getName(), // INSERT|UPDATE|DELETE
statementType, // FOREIGN_KEY|REFERENCED_KEY
type, // referenced backing index uuid
pkIndexId, pkIndexConglom.getConglomerateNumber(), // referenced backing index conglom
refcd.getUUID(), // referenced constraint is
refcd.deferrable(), // fk backing index uuids
uuids, // fk backing index congloms
conglomNumbers, // is self ref array of bool
isSelfReferencingFK, remapReferencedColumns(cd, rowMap), // columns referenced by key
dd.getRowLocationTemplate(getLanguageConnectionContext(), refTd), // referential action rules
raRules, // deferrable flags
deferrable, // UUID of fks
fkIds));
}
// Now convert the list into an array.
if (!fkList.isEmpty()) {
fkInfo = fkList.toArray(new FKInfo[fkList.size()]);
}
// Convert the ref action info lists to arrays.
int size = refActions.size();
if (size > 0) {
fkTableNames = new String[size];
fkSchemaNames = new String[size];
fkRefActions = new int[size];
fkColDescriptors = new ColumnDescriptorList[size];
fkIndexConglomNumbers = new long[size];
fkColArrays = new int[size][];
for (int i = 0; i < size; i++) {
fkTableNames[i] = refTableNames.get(i);
fkSchemaNames[i] = refSchemaNames.get(i);
fkRefActions[i] = (refActions.get(i)).intValue();
fkColDescriptors[i] = refColDescriptors.get(i);
fkIndexConglomNumbers[i] = (refIndexConglomNum.get(i)).longValue();
fkColArrays[i] = (fkColMap.get(i));
}
}
}
Aggregations