use of org.apache.derby.iapi.store.access.GroupFetchScanController in project derby by apache.
the class IndexStatisticsDaemonImpl method updateIndexStatsMinion.
/**
* Updates the index statistics for the given table and the specified
* indexes.
* <p>
* <strong>API note</strong>: Using {@code null} to update the statistics
* for all conglomerates is preferred over explicitly passing an array with
* all the conglomerates for the table. Doing so allows for some
* optimizations, and will cause a disposable statistics check to be
* performed.
*
* @param lcc language connection context used to perform the work
* @param td the table to update index stats for
* @param cds the conglomerates to update statistics for (non-index
* conglomerates will be ignored), {@code null} means all indexes
* @param asBackgroundTask whether the updates are done automatically as
* part of a background task or if explicitly invoked by the user
* @throws StandardException if something goes wrong
*/
private void updateIndexStatsMinion(LanguageConnectionContext lcc, TableDescriptor td, ConglomerateDescriptor[] cds, boolean asBackgroundTask) throws StandardException {
// can only properly identify disposable stats if cds == null,
// which means we are processing all indexes on the conglomerate.
final boolean identifyDisposableStats = (cds == null);
// Fetch descriptors if we're updating statistics for all indexes.
if (cds == null) {
cds = td.getConglomerateDescriptors();
}
// Extract/derive information from the table descriptor
long[] conglomerateNumber = new long[cds.length];
ExecIndexRow[] indexRow = new ExecIndexRow[cds.length];
TransactionController tc = lcc.getTransactionExecute();
ConglomerateController heapCC = tc.openConglomerate(td.getHeapConglomerateId(), false, 0, TransactionController.MODE_RECORD, asBackgroundTask ? TransactionController.ISOLATION_READ_UNCOMMITTED : TransactionController.ISOLATION_REPEATABLE_READ);
// create a list of indexes that should have statistics, by looking
// at all indexes on the conglomerate, and conditionally skipping
// unique single column indexes. This set is the "non disposable
// stat list".
UUID[] non_disposable_objectUUID = new UUID[cds.length];
try {
for (int i = 0; i < cds.length; i++) {
// Skip non-index conglomerates
if (!cds[i].isIndex()) {
conglomerateNumber[i] = -1;
continue;
}
IndexRowGenerator irg = cds[i].getIndexDescriptor();
// or we are running in soft-upgrade-mode on a pre 10.9 db.
if (skipDisposableStats) {
if (irg.isUnique() && irg.numberOfOrderedColumns() == 1) {
conglomerateNumber[i] = -1;
continue;
}
}
// at this point have found a stat for an existing
// index which is not a single column unique index, add it
// to the list of "non disposable stats"
conglomerateNumber[i] = cds[i].getConglomerateNumber();
non_disposable_objectUUID[i] = cds[i].getUUID();
indexRow[i] = irg.getNullIndexRow(td.getColumnDescriptorList(), heapCC.newRowLocationTemplate());
}
} finally {
heapCC.close();
}
if (identifyDisposableStats) {
// Note this loop is not controlled by the skipDisposableStats
// flag. The above loop controls if we drop single column unique
// index stats or not. In all cases we are going to drop
// stats with no associated index (orphaned stats).
List<StatisticsDescriptor> existingStats = td.getStatistics();
StatisticsDescriptor[] stats = (StatisticsDescriptor[]) existingStats.toArray(new StatisticsDescriptor[existingStats.size()]);
// those entries that don't have a matching conglomerate in the
for (int si = 0; si < stats.length; si++) {
UUID referencedIndex = stats[si].getReferenceID();
boolean isValid = false;
for (int ci = 0; ci < conglomerateNumber.length; ci++) {
if (referencedIndex.equals(non_disposable_objectUUID[ci])) {
isValid = true;
break;
}
}
// mechanism in case of another bug like DERBY-5681 in Derby.
if (!isValid) {
String msg = "dropping disposable statistics entry " + stats[si].getUUID() + " for index " + stats[si].getReferenceID() + " (cols=" + stats[si].getColumnCount() + ")";
logAlways(td, null, msg);
trace(1, msg + " on table " + stats[si].getTableUUID());
DataDictionary dd = lcc.getDataDictionary();
if (!lcc.dataDictionaryInWriteMode()) {
dd.startWriting(lcc);
}
dd.dropStatisticsDescriptors(td.getUUID(), stats[si].getReferenceID(), tc);
if (asBackgroundTask) {
lcc.internalCommit(true);
}
}
}
}
// [x][0] = conglomerate number, [x][1] = start time, [x][2] = stop time
long[][] scanTimes = new long[conglomerateNumber.length][3];
int sci = 0;
for (int indexNumber = 0; indexNumber < conglomerateNumber.length; indexNumber++) {
if (conglomerateNumber[indexNumber] == -1)
continue;
// Check if daemon has been disabled.
if (asBackgroundTask) {
if (isShuttingDown()) {
break;
}
}
scanTimes[sci][0] = conglomerateNumber[indexNumber];
scanTimes[sci][1] = System.currentTimeMillis();
// Subtract one for the RowLocation added for indexes.
int numCols = indexRow[indexNumber].nColumns() - 1;
long[] cardinality = new long[numCols];
KeyComparator cmp = new KeyComparator(indexRow[indexNumber]);
/* Read uncommitted, with record locking. Actually CS store may
not hold record locks */
GroupFetchScanController gsc = tc.openGroupFetchScan(conglomerateNumber[indexNumber], // hold
false, 0, // locking
TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED, // scancolumnlist-- want everything.
null, // startkeyvalue-- start from the beginning.
null, 0, // qualifiers, none!
null, // stopkeyvalue,
null, 0);
try {
int rowsFetched = 0;
boolean giving_up_on_shutdown = false;
while ((rowsFetched = cmp.fetchRows(gsc)) > 0) {
// I/O that is processed as a convenient point.
if (asBackgroundTask) {
if (isShuttingDown()) {
giving_up_on_shutdown = true;
break;
}
}
for (int i = 0; i < rowsFetched; i++) {
int whichPositionChanged = cmp.compareWithPrevKey(i);
if (whichPositionChanged >= 0) {
for (int j = whichPositionChanged; j < numCols; j++) cardinality[j]++;
}
}
}
if (giving_up_on_shutdown)
break;
gsc.setEstimatedRowCount(cmp.getRowCount());
} finally // try
{
gsc.close();
gsc = null;
}
scanTimes[sci++][2] = System.currentTimeMillis();
// We have scanned the indexes, so let's give this a few attempts
// before giving up.
int retries = 0;
while (true) {
try {
writeUpdatedStats(lcc, td, non_disposable_objectUUID[indexNumber], cmp.getRowCount(), cardinality, asBackgroundTask);
break;
} catch (StandardException se) {
retries++;
if (se.isLockTimeout() && retries < 3) {
trace(2, "lock timeout when writing stats, retrying");
sleep(100 * retries);
} else {
// o too many lock timeouts
throw se;
}
}
}
}
log(asBackgroundTask, td, fmtScanTimes(scanTimes));
}
use of org.apache.derby.iapi.store.access.GroupFetchScanController in project derby by apache.
the class AlterTableConstantAction method defragmentRows.
/**
* Defragment rows in the given table.
* <p>
* Scans the rows at the end of a table and moves them to free spots
* towards the beginning of the table. In the same transaction all
* associated indexes are updated to reflect the new location of the
* base table row.
* <p>
* After a defragment pass, if was possible, there will be a set of
* empty pages at the end of the table which can be returned to the
* operating system by calling truncateEnd(). The allocation bit
* maps will be set so that new inserts will tend to go to empty and
* half filled pages starting from the front of the conglomerate.
*
* @param tc transaction controller to use to do updates.
*/
private void defragmentRows(TransactionController tc) throws StandardException {
GroupFetchScanController base_group_fetch_cc = null;
int num_indexes = 0;
int[][] index_col_map = null;
ScanController[] index_scan = null;
ConglomerateController[] index_cc = null;
DataValueDescriptor[][] index_row = null;
TransactionController nested_tc = null;
try {
nested_tc = tc.startNestedUserTransaction(false, true);
switch(td.getTableType()) {
/* Skip views and vti tables */
case TableDescriptor.VIEW_TYPE:
case TableDescriptor.VTI_TYPE:
return;
// DERBY-719,DERBY-720
default:
break;
}
/* Get a row template for the base table */
ExecRow br = lcc.getLanguageConnectionFactory().getExecutionFactory().getValueRow(td.getNumberOfColumns());
/* Fill the row with nulls of the correct type */
for (ColumnDescriptor cd : td.getColumnDescriptorList()) {
br.setColumn(cd.getPosition(), cd.getType().getNull());
}
DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
row_array[0] = br.getRowArray();
RowLocation[] old_row_location_array = new RowLocation[100];
RowLocation[] new_row_location_array = new RowLocation[100];
// Create the following 3 arrays which will be used to update
// each index as the scan moves rows about the heap as part of
// the compress:
// index_col_map - map location of index cols in the base row,
// ie. index_col_map[0] is column offset of 1st
// key column in base row. All offsets are 0
// based.
// index_scan - open ScanController used to delete old index row
// index_cc - open ConglomerateController used to insert new
// row
ConglomerateDescriptor[] conglom_descriptors = td.getConglomerateDescriptors();
// conglom_descriptors has an entry for the conglomerate and each
// one of it's indexes.
num_indexes = conglom_descriptors.length - 1;
// if indexes exist, set up data structures to update them
if (num_indexes > 0) {
// allocate arrays
index_col_map = new int[num_indexes][];
index_scan = new ScanController[num_indexes];
index_cc = new ConglomerateController[num_indexes];
index_row = new DataValueDescriptor[num_indexes][];
setup_indexes(nested_tc, td, index_col_map, index_scan, index_cc, index_row);
}
/* Open the heap for reading */
base_group_fetch_cc = nested_tc.defragmentConglomerate(td.getHeapConglomerateId(), false, true, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
int num_rows_fetched;
while ((num_rows_fetched = base_group_fetch_cc.fetchNextGroup(row_array, old_row_location_array, new_row_location_array)) != 0) {
if (num_indexes > 0) {
for (int row = 0; row < num_rows_fetched; row++) {
for (int index = 0; index < num_indexes; index++) {
fixIndex(row_array[row], index_row[index], old_row_location_array[row], new_row_location_array[row], index_cc[index], index_scan[index], index_col_map[index]);
}
}
}
}
// TODO - It would be better if commits happened more frequently
// in the nested transaction, but to do that there has to be more
// logic to catch a ddl that might jump in the middle of the
// above loop and invalidate the various table control structures
// which are needed to properly update the indexes. For example
// the above loop would corrupt an index added midway through
// the loop if not properly handled. See DERBY-1188.
nested_tc.commit();
} finally {
/* Clean up before we leave */
if (base_group_fetch_cc != null) {
base_group_fetch_cc.close();
base_group_fetch_cc = null;
}
if (num_indexes > 0) {
for (int i = 0; i < num_indexes; i++) {
if (index_scan != null && index_scan[i] != null) {
index_scan[i].close();
index_scan[i] = null;
}
if (index_cc != null && index_cc[i] != null) {
index_cc[i].close();
index_cc[i] = null;
}
}
}
if (nested_tc != null) {
nested_tc.destroy();
}
}
}
use of org.apache.derby.iapi.store.access.GroupFetchScanController in project derby by apache.
the class CreateIndexConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for
* creating an index.
*
* <P>
* A index is represented as:
* <UL>
* <LI> ConglomerateDescriptor.
* </UL>
* No dependencies are created.
*
* @see ConglomerateDescriptor
* @see SchemaDescriptor
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID toid;
ColumnDescriptor columnDescriptor;
int[] baseColumnPositions;
IndexRowGenerator indexRowGenerator = null;
ExecRow[] baseRows;
ExecIndexRow[] indexRows;
ExecRow[] compactBaseRows;
GroupFetchScanController scan;
RowLocationRetRowSource rowSource;
long sortId;
int maxBaseColumnPosition = -1;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc, true);
/* Get the table descriptor. */
/* See if we can get the TableDescriptor
* from the Activation. (Will be there
* for backing indexes.)
*/
td = activation.getDDLTableDescriptor();
if (td == null) {
/* tableId will be non-null if adding an index to
* an existing table (as opposed to creating a
* table with a constraint with a backing index).
*/
if (tableId != null) {
td = dd.getTableDescriptor(tableId);
} else {
td = dd.getTableDescriptor(tableName, sd, tc);
}
}
if (td == null) {
throw StandardException.newException(SQLState.LANG_CREATE_INDEX_NO_TABLE, indexName, tableName);
}
if (td.getTableType() == TableDescriptor.SYSTEM_TABLE_TYPE) {
throw StandardException.newException(SQLState.LANG_CREATE_SYSTEM_INDEX_ATTEMPTED, indexName, tableName);
}
/* Get a shared table lock on the table. We need to lock table before
* invalidate dependents, otherwise, we may interfere with the
* compilation/re-compilation of DML/DDL. See beetle 4325 and $WS/
* docs/language/SolutionsToConcurrencyIssues.txt (point f).
*/
lockTableForDDL(tc, td.getHeapConglomerateId(), false);
// depended on this table (including this one)
if (!forCreateTable) {
dm.invalidateFor(td, DependencyManager.CREATE_INDEX, lcc);
}
// Translate the base column names to column positions
baseColumnPositions = new int[columnNames.length];
for (int i = 0; i < columnNames.length; i++) {
// Look up the column in the data dictionary
columnDescriptor = td.getColumnDescriptor(columnNames[i]);
if (columnDescriptor == null) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, columnNames[i], tableName);
}
TypeId typeId = columnDescriptor.getType().getTypeId();
// Don't allow a column to be created on a non-orderable type
ClassFactory cf = lcc.getLanguageConnectionFactory().getClassFactory();
boolean isIndexable = typeId.orderable(cf);
if (isIndexable && typeId.userType()) {
String userClass = typeId.getCorrespondingJavaTypeName();
// run the compare method.
try {
if (cf.isApplicationClass(cf.loadApplicationClass(userClass)))
isIndexable = false;
} catch (ClassNotFoundException cnfe) {
// shouldn't happen as we just check the class is orderable
isIndexable = false;
}
}
if (!isIndexable) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_ORDERABLE_DURING_EXECUTION, typeId.getSQLTypeName());
}
// Remember the position in the base table of each column
baseColumnPositions[i] = columnDescriptor.getPosition();
if (maxBaseColumnPosition < baseColumnPositions[i])
maxBaseColumnPosition = baseColumnPositions[i];
}
/* The code below tries to determine if the index that we're about
* to create can "share" a conglomerate with an existing index.
* If so, we will use a single physical conglomerate--namely, the
* one that already exists--to support both indexes. I.e. we will
* *not* create a new conglomerate as part of this constant action.
*
* Deferrable constraints are backed by indexes that are *not* shared
* since they use physically non-unique indexes and as such are
* different from indexes used to represent non-deferrable
* constraints.
*/
// check if we have similar indices already for this table
ConglomerateDescriptor[] congDescs = td.getConglomerateDescriptors();
boolean shareExisting = false;
for (int i = 0; i < congDescs.length; i++) {
ConglomerateDescriptor cd = congDescs[i];
if (!cd.isIndex())
continue;
if (droppedConglomNum == cd.getConglomerateNumber()) {
/* We can't share with any conglomerate descriptor
* whose conglomerate number matches the dropped
* conglomerate number, because that descriptor's
* backing conglomerate was dropped, as well. If
* we're going to share, we have to share with a
* descriptor whose backing physical conglomerate
* is still around.
*/
continue;
}
IndexRowGenerator irg = cd.getIndexDescriptor();
int[] bcps = irg.baseColumnPositions();
boolean[] ia = irg.isAscending();
int j = 0;
/* The conditions which allow an index to share an existing
* conglomerate are as follows:
*
* 1. the set of columns (both key and include columns) and their
* order in the index is the same as that of an existing index AND
*
* 2. the ordering attributes are the same AND
*
* 3. one of the following is true:
* a) the existing index is unique, OR
* b) the existing index is non-unique with uniqueWhenNotNulls
* set to TRUE and the index being created is non-unique, OR
* c) both the existing index and the one being created are
* non-unique and have uniqueWithDuplicateNulls set to FALSE.
*
* 4. hasDeferrableChecking is FALSE.
*/
boolean possibleShare = (irg.isUnique() || !unique) && (bcps.length == baseColumnPositions.length) && !hasDeferrableChecking;
// is set to true (backing index for unique constraint)
if (possibleShare && !irg.isUnique()) {
/* If the existing index has uniqueWithDuplicateNulls set to
* TRUE it can be shared by other non-unique indexes; otherwise
* the existing non-unique index has uniqueWithDuplicateNulls
* set to FALSE, which means the new non-unique conglomerate
* can only share if it has uniqueWithDuplicateNulls set to
* FALSE, as well.
*/
possibleShare = (irg.isUniqueWithDuplicateNulls() || !uniqueWithDuplicateNulls);
}
if (possibleShare && indexType.equals(irg.indexType())) {
for (; j < bcps.length; j++) {
if ((bcps[j] != baseColumnPositions[j]) || (ia[j] != isAscending[j]))
break;
}
}
if (// share
j == baseColumnPositions.length) {
/*
* Don't allow users to create a duplicate index. Allow if being done internally
* for a constraint
*/
if (!isConstraint) {
activation.addWarning(StandardException.newWarning(SQLState.LANG_INDEX_DUPLICATE, indexName, cd.getConglomerateName()));
return;
}
/* Sharing indexes share the physical conglomerate
* underneath, so pull the conglomerate number from
* the existing conglomerate descriptor.
*/
conglomId = cd.getConglomerateNumber();
/* We create a new IndexRowGenerator because certain
* attributes--esp. uniqueness--may be different between
* the index we're creating and the conglomerate that
* already exists. I.e. even though we're sharing a
* conglomerate, the new index is not necessarily
* identical to the existing conglomerate. We have to
* keep track of that info so that if we later drop
* the shared physical conglomerate, we can figure out
* what this index (the one we're creating now) is
* really supposed to look like.
*/
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, // uniqueDeferrable
false, // deferrable indexes are not shared
false, baseColumnPositions, isAscending, baseColumnPositions.length);
// DERBY-655 and DERBY-1343
// Sharing indexes will have unique logical conglomerate UUIDs.
conglomerateUUID = dd.getUUIDFactory().createUUID();
shareExisting = true;
break;
}
}
/* If we have a droppedConglomNum then the index we're about to
* "create" already exists--i.e. it has an index descriptor and
* the corresponding information is already in the system catalogs.
* The only thing we're missing, then, is the physical conglomerate
* to back the index (because the old conglomerate was dropped).
*/
boolean alreadyHaveConglomDescriptor = (droppedConglomNum > -1L);
/* If this index already has an essentially same one, we share the
* conglomerate with the old one, and just simply add a descriptor
* entry into SYSCONGLOMERATES--unless we already have a descriptor,
* in which case we don't even need to do that.
*/
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
if (shareExisting && !alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
// can't just return yet, need to get member "indexTemplateRow"
// because create constraint may use it
}
// Describe the properties of the index to the store using Properties
// RESOLVE: The following properties assume a BTREE index.
Properties indexProperties;
if (properties != null) {
indexProperties = properties;
} else {
indexProperties = new Properties();
}
// Tell it the conglomerate id of the base table
indexProperties.put("baseConglomerateId", Long.toString(td.getHeapConglomerateId()));
if (uniqueWithDuplicateNulls && !hasDeferrableChecking) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexProperties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
} else {
// index creating a unique index instead.
if (uniqueWithDuplicateNulls) {
unique = true;
}
}
}
// All indexes are unique because they contain the RowLocation.
// The number of uniqueness columns must include the RowLocation
// if the user did not specify a unique index.
indexProperties.put("nUniqueColumns", Integer.toString(unique ? baseColumnPositions.length : baseColumnPositions.length + 1));
// By convention, the row location column is the last column
indexProperties.put("rowLocationColumn", Integer.toString(baseColumnPositions.length));
// For now, all columns are key fields, including the RowLocation
indexProperties.put("nKeyFields", Integer.toString(baseColumnPositions.length + 1));
// For now, assume that all index columns are ordered columns
if (!shareExisting) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, uniqueDeferrable, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), baseColumnPositions, isAscending, baseColumnPositions.length);
} else {
indexRowGenerator = new IndexRowGenerator(indexType, unique, false, false, false, baseColumnPositions, isAscending, baseColumnPositions.length);
}
}
/* Now add the rows from the base table to the conglomerate.
* We do this by scanning the base table and inserting the
* rows into a sorter before inserting from the sorter
* into the index. This gives us better performance
* and a more compact index.
*/
rowSource = null;
sortId = 0;
// set to true once the sorter is created
boolean needToDropSort = false;
/* bulkFetchSIze will be 16 (for now) unless
* we are creating the table in which case it
* will be 1. Too hard to remove scan when
* creating index on new table, so minimize
* work where we can.
*/
int bulkFetchSize = (forCreateTable) ? 1 : 16;
int numColumns = td.getNumberOfColumns();
int approximateRowSize = 0;
// Create the FormatableBitSet for mapping the partial to full base row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
for (int index = 0; index < baseColumnPositions.length; index++) {
bitSet.set(baseColumnPositions[index]);
}
FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1);
// Start by opening a full scan on the base table.
scan = tc.openGroupFetchScan(td.getHeapConglomerateId(), // hold
false, // open base table read only
0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, // all fields as objects
zeroBasedBitSet, // startKeyValue
(DataValueDescriptor[]) null, // not used when giving null start posn.
0, // qualifier
null, // stopKeyValue
(DataValueDescriptor[]) null, // not used when giving null stop posn.
0);
// Create an array to put base row template
baseRows = new ExecRow[bulkFetchSize];
indexRows = new ExecIndexRow[bulkFetchSize];
compactBaseRows = new ExecRow[bulkFetchSize];
try {
// Create the array of base row template
for (int i = 0; i < bulkFetchSize; i++) {
// create a base row template
baseRows[i] = activation.getExecutionFactory().getValueRow(maxBaseColumnPosition);
// create an index row template
indexRows[i] = indexRowGenerator.getIndexRowTemplate();
// create a compact base row template
compactBaseRows[i] = activation.getExecutionFactory().getValueRow(baseColumnPositions.length);
}
indexTemplateRow = indexRows[0];
// Fill the partial row with nulls of the correct type
ColumnDescriptorList cdl = td.getColumnDescriptorList();
int cdlSize = cdl.size();
for (int index = 0, numSet = 0; index < cdlSize; index++) {
if (!zeroBasedBitSet.get(index)) {
continue;
}
numSet++;
ColumnDescriptor cd = cdl.elementAt(index);
DataTypeDescriptor dts = cd.getType();
for (int i = 0; i < bulkFetchSize; i++) {
// Put the column in both the compact and sparse base rows
baseRows[i].setColumn(index + 1, dts.getNull());
compactBaseRows[i].setColumn(numSet, baseRows[i].getColumn(index + 1));
}
// Calculate the approximate row size for the index row
approximateRowSize += dts.getTypeId().getApproximateLengthInBytes(dts);
}
// Get an array of RowLocation template
RowLocation[] rl = new RowLocation[bulkFetchSize];
for (int i = 0; i < bulkFetchSize; i++) {
rl[i] = scan.newRowLocationTemplate();
// Get an index row based on the base row
indexRowGenerator.getIndexRow(compactBaseRows[i], rl[i], indexRows[i], bitSet);
}
/* now that we got indexTemplateRow, done for sharing index
*/
if (shareExisting)
return;
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key.
* We create a basic sort observer for non-unique indexes
* so that we can reuse the wrappers during an external
* sort.
*/
int numColumnOrderings;
SortObserver sortObserver;
Properties sortProperties = null;
if (unique || uniqueWithDuplicateNulls || uniqueDeferrable) {
// if the index is a constraint, use constraintname in
// possible error message
String indexOrConstraintName = indexName;
if (conglomerateUUID != null) {
ConglomerateDescriptor cd = dd.getConglomerateDescriptor(conglomerateUUID);
if ((isConstraint) && (cd != null && cd.getUUID() != null && td != null)) {
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
}
}
if (unique || uniqueDeferrable) {
numColumnOrderings = unique ? baseColumnPositions.length : baseColumnPositions.length + 1;
sortObserver = new UniqueIndexSortObserver(lcc, constraintID, true, uniqueDeferrable, initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
} else {
// unique with duplicate nulls allowed.
numColumnOrderings = baseColumnPositions.length + 1;
// tell transaction controller to use the unique with
// duplicate nulls sorter, when making createSort() call.
sortProperties = new Properties();
sortProperties.put(AccessFactoryGlobals.IMPL_TYPE, AccessFactoryGlobals.SORT_UNIQUEWITHDUPLICATENULLS_EXTERNAL);
// use sort operator which treats nulls unequal
sortObserver = new UniqueWithDuplicateNullsIndexSortObserver(lcc, constraintID, true, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
}
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(true, false, indexTemplateRow, true);
}
ColumnOrdering[] order = new ColumnOrdering[numColumnOrderings];
for (int i = 0; i < numColumnOrderings; i++) {
order[i] = new IndexColumnOrder(i, unique || i < numColumnOrderings - 1 ? isAscending[i] : true);
}
// create the sorter
sortId = tc.createSort(sortProperties, indexTemplateRow.getRowArrayClone(), order, sortObserver, // not in order
false, scan.getEstimatedRowCount(), // est row size, -1 means no idea
approximateRowSize);
needToDropSort = true;
// Populate sorter and get the output of the sorter into a row
// source. The sorter has the indexed columns only and the columns
// are in the correct order.
rowSource = loadSorter(baseRows, indexRows, tc, scan, sortId, rl);
conglomId = tc.createAndLoadConglomerate(indexType, // index row template
indexTemplateRow.getRowArray(), // colums sort order
order, indexRowGenerator.getColumnCollationIds(td.getColumnDescriptorList()), indexProperties, // not temporary
TransactionController.IS_DEFAULT, rowSource, (long[]) null);
} finally {
/* close the table scan */
if (scan != null)
scan.close();
/* close the sorter row source before throwing exception */
if (rowSource != null)
rowSource.closeRowSource();
/*
** drop the sort so that intermediate external sort run can be
** removed from disk
*/
if (needToDropSort)
tc.dropSort(sortId);
}
ConglomerateController indexController = tc.openConglomerate(conglomId, false, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
// Check to make sure that the conglomerate can be used as an index
if (!indexController.isKeyed()) {
indexController.close();
throw StandardException.newException(SQLState.LANG_NON_KEYED_INDEX, indexName, indexType);
}
indexController.close();
//
if (!alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
/* Since we created a new conglomerate descriptor, load
* its UUID into the corresponding field, to ensure that
* it is properly set in the StatisticsDescriptor created
* below.
*/
conglomerateUUID = cgd.getUUID();
}
CardinalityCounter cCount = (CardinalityCounter) rowSource;
long numRows = cCount.getRowCount();
if (addStatistics(dd, indexRowGenerator, numRows)) {
long[] c = cCount.getCardinality();
for (int i = 0; i < c.length; i++) {
StatisticsDescriptor statDesc = new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(), conglomerateUUID, td.getUUID(), "I", new StatisticsImpl(numRows, c[i]), i + 1);
dd.addDescriptor(statDesc, null, DataDictionary.SYSSTATISTICS_CATALOG_NUM, true, tc);
}
}
}
use of org.apache.derby.iapi.store.access.GroupFetchScanController in project derby by apache.
the class ConstraintConstantAction method validateFKConstraint.
/**
* Make sure that the foreign key constraint is valid
* with the existing data in the target table. Open
* the table, if there aren't any rows, ok. If there
* are rows, open a scan on the referenced key with
* table locking at level 2. Pass in the scans to
* the BulkRIChecker. If any rows fail, barf.
*
* @param tc transaction controller
* @param dd data dictionary
* @param fk foreign key constraint
* @param refcd referenced key
* @param indexTemplateRow index template row
*
* @exception StandardException on error
*/
static void validateFKConstraint(Activation activation, TransactionController tc, DataDictionary dd, ForeignKeyConstraintDescriptor fk, ReferencedKeyConstraintDescriptor refcd, ExecRow indexTemplateRow) throws StandardException {
GroupFetchScanController refScan = null;
GroupFetchScanController fkScan = tc.openGroupFetchScan(fk.getIndexConglomerateDescriptor(dd).getConglomerateNumber(), // hold
false, // read only
0, // already locked
TransactionController.MODE_TABLE, TransactionController.ISOLATION_READ_COMMITTED, // retrieve all fields
(FormatableBitSet) null, // startKeyValue
(DataValueDescriptor[]) null, // startSearchOp
ScanController.GE, // qualifier
null, // stopKeyValue
(DataValueDescriptor[]) null, // stopSearchOp
ScanController.GT);
try {
/*
** If we have no rows, then we are ok. This will
** catch the CREATE TABLE T (x int references P) case
** (as well as an ALTER TABLE ADD CONSTRAINT where there
** are no rows in the target table).
*/
if (!fkScan.next()) {
fkScan.close();
return;
}
fkScan.reopenScan(// startKeyValue
(DataValueDescriptor[]) null, // startSearchOp
ScanController.GE, // qualifier
null, // stopKeyValue
(DataValueDescriptor[]) null, // stopSearchOp
ScanController.GT);
/*
** Make sure each row in the new fk has a matching
** referenced key. No need to get any special locking
** on the referenced table because it cannot delete
** any keys we match because it will block on the table
** lock on the fk table (we have an ex tab lock on
** the target table of this ALTER TABLE command).
** Note that we are doing row locking on the referenced
** table. We could speed things up and get table locking
** because we are likely to be hitting a lot of rows
** in the referenced table, but we are going to err
** on the side of concurrency here.
*/
refScan = tc.openGroupFetchScan(refcd.getIndexConglomerateDescriptor(dd).getConglomerateNumber(), // hold
false, // read only
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_COMMITTED, // retrieve all fields
(FormatableBitSet) null, // startKeyValue
(DataValueDescriptor[]) null, // startSearchOp
ScanController.GE, // qualifier
null, // stopKeyValue
(DataValueDescriptor[]) null, // stopSearchOp
ScanController.GT);
RIBulkChecker riChecker = new RIBulkChecker(activation, refScan, fkScan, indexTemplateRow, // fail on 1st failure
true, (ConglomerateController) null, (ExecRow) null, fk.getTableDescriptor().getSchemaName(), fk.getTableDescriptor().getName(), fk.getUUID(), fk.deferrable(), fk.getIndexConglomerateDescriptor(dd).getConglomerateNumber(), refcd.getIndexConglomerateDescriptor(dd).getConglomerateNumber());
int numFailures = riChecker.doCheck();
if (numFailures > 0) {
StandardException se = StandardException.newException(SQLState.LANG_ADD_FK_CONSTRAINT_VIOLATION, fk.getConstraintName(), fk.getTableDescriptor().getName());
throw se;
}
} finally {
if (fkScan != null) {
fkScan.close();
fkScan = null;
}
if (refScan != null) {
refScan.close();
refScan = null;
}
}
}
use of org.apache.derby.iapi.store.access.GroupFetchScanController in project derby by apache.
the class T_QualifierTest method t_scanFetchNextGroup.
/**
* Test scan which does FetchNextGroup with all of the fields.
* <p>
*
* @return Whether the test succeeded or not.
*
* @exception StandardException Standard exception policy.
*/
public static boolean t_scanFetchNextGroup(TransactionController tc, int group_size, long conglomid, DataValueDescriptor[] fetch_template, DataValueDescriptor[] start_key, int start_op, Qualifier[][] qualifier, DataValueDescriptor[] stop_key, int stop_op, int expect_numrows, int input_expect_key, int order) throws StandardException, T_Fail {
HashSet set = null;
boolean ordered = (order == ORDER_FORWARD || order == ORDER_DESC);
if (!ordered) {
set = create_hash_set(input_expect_key, expect_numrows, order);
}
/**
********************************************************************
* Forward scan test case
**********************************************************************
*/
GroupFetchScanController scan = tc.openGroupFetchScan(conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, start_key, start_op, qualifier, stop_key, stop_op);
// create an array of "group_size" rows to use in the fetch group call.
DataValueDescriptor[][] row_array = new DataValueDescriptor[group_size][];
row_array[0] = TemplateRow.newRow(fetch_template);
int expect_key = input_expect_key;
long key = -42;
long numrows = 0;
int group_row_count = 0;
// loop asking for "group_size" rows at a time.
while ((group_row_count = scan.fetchNextGroup(row_array, (RowLocation[]) null)) != 0) {
// loop through the rows returned into the row_array.
for (int i = 0; i < group_row_count; i++) {
// see if we are getting the right keys.
key = ((SQLLongint) (row_array[i][2])).getLong();
if (ordered) {
if (key != expect_key) {
return (fail("(t_scanFetchNextGroup-forward) wrong key, expect (" + expect_key + ")" + "but got (" + key + "). num rows = " + numrows));
} else {
if (order == ORDER_DESC)
expect_key--;
else
expect_key++;
}
} else {
if (!set.remove(key)) {
return (fail("(t_scanFetchNextGroup-forward) wrong key, expected (" + expect_key + ")" + "but got (" + key + ")."));
}
}
numrows++;
}
}
scan.close();
if (numrows != expect_numrows) {
return (fail("(t_scanFetchNextGroup-forward) wrong number of rows. Expected " + expect_numrows + " rows, but got " + numrows + "rows."));
}
return (true);
}
Aggregations