Search in sources :

Example 86 with ExecRow

use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.

the class AlterTableConstantAction method truncateTable.

/* 
	 * TRUNCATE TABLE  TABLENAME; (quickly removes all the rows from table and
	 * it's correctponding indexes).
	 * Truncate is implemented by dropping the existing conglomerates(heap,indexes) and recreating a
	 * new ones  with the properties of dropped conglomerates. Currently Store
	 * does not have support to truncate existing conglomerated until store
	 * supports it , this is the only way to do it.
	 * Error Cases: Truncate error cases same as other DDL's statements except
	 * 1)Truncate is not allowed when the table is references by another table.
	 * 2)Truncate is not allowed when there are enabled delete triggers on the table.
	 * Note: Because conglomerate number is changed during recreate process all the statements will be
	 * marked as invalide and they will get recompiled internally on their next
	 * execution. This is okay because truncate makes the number of rows to zero
	 * it may be good idea to recompile them becuase plans are likely to be
	 * incorrect. Recompile is done internally by Derby, user does not have
	 * any effect.
	 */
private void truncateTable() throws StandardException {
    ExecRow emptyHeapRow;
    long newHeapConglom;
    Properties properties = new Properties();
    RowLocation rl;
    if (SanityManager.DEBUG) {
        if (lockGranularity != '\0') {
            SanityManager.THROWASSERT("lockGranularity expected to be '\0', not " + lockGranularity);
        }
        SanityManager.ASSERT(columnInfo == null, "columnInfo expected to be null");
        SanityManager.ASSERT(constraintActions == null, "constraintActions expected to be null");
    }
    // and the ON DELETE action is NO ACTION.
    for (ConstraintDescriptor cd : dd.getConstraintDescriptors(td)) {
        if (cd instanceof ReferencedKeyConstraintDescriptor) {
            final ReferencedKeyConstraintDescriptor rfcd = (ReferencedKeyConstraintDescriptor) cd;
            for (ConstraintDescriptor fkcd : rfcd.getNonSelfReferencingFK(ConstraintDescriptor.ENABLED)) {
                final ForeignKeyConstraintDescriptor fk = (ForeignKeyConstraintDescriptor) fkcd;
                throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_FK_REFERENCE_TABLE, td.getName());
            }
        }
    }
    // truncate is not allowed when there are enabled DELETE triggers
    for (TriggerDescriptor trd : dd.getTriggerDescriptors(td)) {
        if (trd.listensForEvent(TriggerDescriptor.TRIGGER_EVENT_DELETE) && trd.isEnabled()) {
            throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_ENABLED_DELETE_TRIGGERS, td.getName(), trd.getName());
        }
    }
    // gather information from the existing conglomerate to create new one.
    emptyHeapRow = td.getEmptyExecRow();
    compressHeapCC = tc.openConglomerate(td.getHeapConglomerateId(), false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
    rl = compressHeapCC.newRowLocationTemplate();
    // Get the properties on the old heap
    compressHeapCC.getInternalTablePropertySet(properties);
    compressHeapCC.close();
    compressHeapCC = null;
    // create new conglomerate
    newHeapConglom = tc.createConglomerate("heap", emptyHeapRow.getRowArray(), // column sort order - not required for heap
    null, td.getColumnCollationIds(), properties, TransactionController.IS_DEFAULT);
    /* Set up index info to perform truncate on them*/
    getAffectedIndexes();
    if (numIndexes > 0) {
        indexRows = new ExecIndexRow[numIndexes];
        ordering = new ColumnOrdering[numIndexes][];
        collation = new int[numIndexes][];
        for (int index = 0; index < numIndexes; index++) {
            IndexRowGenerator curIndex = compressIRGs[index];
            // create a single index row template for each index
            indexRows[index] = curIndex.getIndexRowTemplate();
            curIndex.getIndexRow(emptyHeapRow, rl, indexRows[index], (FormatableBitSet) null);
            /* For non-unique indexes, we order by all columns + the RID.
				 * For unique indexes, we just order by the columns.
				 * No need to try to enforce uniqueness here as
				 * index should be valid.
				 */
            int[] baseColumnPositions = curIndex.baseColumnPositions();
            boolean[] isAscending = curIndex.isAscending();
            int numColumnOrderings;
            numColumnOrderings = baseColumnPositions.length + 1;
            ordering[index] = new ColumnOrdering[numColumnOrderings];
            collation[index] = curIndex.getColumnCollationIds(td.getColumnDescriptorList());
            for (int ii = 0; ii < numColumnOrderings - 1; ii++) {
                ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
            }
            ordering[index][numColumnOrderings - 1] = new IndexColumnOrder(numColumnOrderings - 1);
        }
    }
    /*
		** Inform the data dictionary that we are about to write to it.
		** There are several calls to data dictionary "get" methods here
		** that might be done in "read" mode in the data dictionary, but
		** it seemed safer to do this whole operation in "write" mode.
		**
		** We tell the data dictionary we're done writing at the end of
		** the transaction.
		*/
    dd.startWriting(lcc);
    // truncate  all indexes
    if (numIndexes > 0) {
        long[] newIndexCongloms = new long[numIndexes];
        for (int index = 0; index < numIndexes; index++) {
            updateIndex(newHeapConglom, dd, index, newIndexCongloms);
        }
    }
    // Update the DataDictionary
    // Get the ConglomerateDescriptor for the heap
    long oldHeapConglom = td.getHeapConglomerateId();
    ConglomerateDescriptor cd = td.getConglomerateDescriptor(oldHeapConglom);
    // Update sys.sysconglomerates with new conglomerate #
    dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
    // Now that the updated information is available in the system tables,
    // we should invalidate all statements that use the old conglomerates
    dm.invalidateFor(td, DependencyManager.TRUNCATE_TABLE, lcc);
    // Drop the old conglomerate
    tc.dropConglomerate(oldHeapConglom);
    cleanUp();
}
Also used : Properties(java.util.Properties) ForeignKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor) ConglomerateDescriptor(org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor) TriggerDescriptor(org.apache.derby.iapi.sql.dictionary.TriggerDescriptor) IndexRowGenerator(org.apache.derby.iapi.sql.dictionary.IndexRowGenerator) CheckConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.CheckConstraintDescriptor) ForeignKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ForeignKeyConstraintDescriptor) ConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor) ReferencedKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ReferencedKeyConstraintDescriptor) ExecRow(org.apache.derby.iapi.sql.execute.ExecRow) ReferencedKeyConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ReferencedKeyConstraintDescriptor) RowLocation(org.apache.derby.iapi.types.RowLocation)

Example 87 with ExecRow

use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.

the class CreateIndexConstantAction method executeConstantAction.

// INTERFACE METHODS
/**
 *	This is the guts of the Execution-time logic for
 *  creating an index.
 *
 *  <P>
 *  A index is represented as:
 *  <UL>
 *  <LI> ConglomerateDescriptor.
 *  </UL>
 *  No dependencies are created.
 *
 *  @see ConglomerateDescriptor
 *  @see SchemaDescriptor
 *	@see ConstantAction#executeConstantAction
 *
 * @exception StandardException		Thrown on failure
 */
public void executeConstantAction(Activation activation) throws StandardException {
    TableDescriptor td;
    UUID toid;
    ColumnDescriptor columnDescriptor;
    int[] baseColumnPositions;
    IndexRowGenerator indexRowGenerator = null;
    ExecRow[] baseRows;
    ExecIndexRow[] indexRows;
    ExecRow[] compactBaseRows;
    GroupFetchScanController scan;
    RowLocationRetRowSource rowSource;
    long sortId;
    int maxBaseColumnPosition = -1;
    LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
    DataDictionary dd = lcc.getDataDictionary();
    DependencyManager dm = dd.getDependencyManager();
    TransactionController tc = lcc.getTransactionExecute();
    /*
		** Inform the data dictionary that we are about to write to it.
		** There are several calls to data dictionary "get" methods here
		** that might be done in "read" mode in the data dictionary, but
		** it seemed safer to do this whole operation in "write" mode.
		**
		** We tell the data dictionary we're done writing at the end of
		** the transaction.
		*/
    dd.startWriting(lcc);
    /*
		** If the schema descriptor is null, then
		** we must have just read ourselves in.  
		** So we will get the corresponding schema
		** descriptor from the data dictionary.
		*/
    SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc, true);
    /* Get the table descriptor. */
    /* See if we can get the TableDescriptor 
		 * from the Activation.  (Will be there
		 * for backing indexes.)
		 */
    td = activation.getDDLTableDescriptor();
    if (td == null) {
        /* tableId will be non-null if adding an index to
			 * an existing table (as opposed to creating a
			 * table with a constraint with a backing index).
			 */
        if (tableId != null) {
            td = dd.getTableDescriptor(tableId);
        } else {
            td = dd.getTableDescriptor(tableName, sd, tc);
        }
    }
    if (td == null) {
        throw StandardException.newException(SQLState.LANG_CREATE_INDEX_NO_TABLE, indexName, tableName);
    }
    if (td.getTableType() == TableDescriptor.SYSTEM_TABLE_TYPE) {
        throw StandardException.newException(SQLState.LANG_CREATE_SYSTEM_INDEX_ATTEMPTED, indexName, tableName);
    }
    /* Get a shared table lock on the table. We need to lock table before
		 * invalidate dependents, otherwise, we may interfere with the
		 * compilation/re-compilation of DML/DDL.  See beetle 4325 and $WS/
		 * docs/language/SolutionsToConcurrencyIssues.txt (point f).
		 */
    lockTableForDDL(tc, td.getHeapConglomerateId(), false);
    // depended on this table (including this one)
    if (!forCreateTable) {
        dm.invalidateFor(td, DependencyManager.CREATE_INDEX, lcc);
    }
    // Translate the base column names to column positions
    baseColumnPositions = new int[columnNames.length];
    for (int i = 0; i < columnNames.length; i++) {
        // Look up the column in the data dictionary
        columnDescriptor = td.getColumnDescriptor(columnNames[i]);
        if (columnDescriptor == null) {
            throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, columnNames[i], tableName);
        }
        TypeId typeId = columnDescriptor.getType().getTypeId();
        // Don't allow a column to be created on a non-orderable type
        ClassFactory cf = lcc.getLanguageConnectionFactory().getClassFactory();
        boolean isIndexable = typeId.orderable(cf);
        if (isIndexable && typeId.userType()) {
            String userClass = typeId.getCorrespondingJavaTypeName();
            // run the compare method.
            try {
                if (cf.isApplicationClass(cf.loadApplicationClass(userClass)))
                    isIndexable = false;
            } catch (ClassNotFoundException cnfe) {
                // shouldn't happen as we just check the class is orderable
                isIndexable = false;
            }
        }
        if (!isIndexable) {
            throw StandardException.newException(SQLState.LANG_COLUMN_NOT_ORDERABLE_DURING_EXECUTION, typeId.getSQLTypeName());
        }
        // Remember the position in the base table of each column
        baseColumnPositions[i] = columnDescriptor.getPosition();
        if (maxBaseColumnPosition < baseColumnPositions[i])
            maxBaseColumnPosition = baseColumnPositions[i];
    }
    /* The code below tries to determine if the index that we're about
		 * to create can "share" a conglomerate with an existing index.
		 * If so, we will use a single physical conglomerate--namely, the
		 * one that already exists--to support both indexes. I.e. we will
		 * *not* create a new conglomerate as part of this constant action.
         *
         * Deferrable constraints are backed by indexes that are *not* shared
         * since they use physically non-unique indexes and as such are
         * different from indexes used to represent non-deferrable
         * constraints.
		 */
    // check if we have similar indices already for this table
    ConglomerateDescriptor[] congDescs = td.getConglomerateDescriptors();
    boolean shareExisting = false;
    for (int i = 0; i < congDescs.length; i++) {
        ConglomerateDescriptor cd = congDescs[i];
        if (!cd.isIndex())
            continue;
        if (droppedConglomNum == cd.getConglomerateNumber()) {
            /* We can't share with any conglomerate descriptor
				 * whose conglomerate number matches the dropped
				 * conglomerate number, because that descriptor's
				 * backing conglomerate was dropped, as well.  If
				 * we're going to share, we have to share with a
				 * descriptor whose backing physical conglomerate
				 * is still around.
				 */
            continue;
        }
        IndexRowGenerator irg = cd.getIndexDescriptor();
        int[] bcps = irg.baseColumnPositions();
        boolean[] ia = irg.isAscending();
        int j = 0;
        /* The conditions which allow an index to share an existing
			 * conglomerate are as follows:
			 *
			 * 1. the set of columns (both key and include columns) and their 
			 *  order in the index is the same as that of an existing index AND 
			 *
			 * 2. the ordering attributes are the same AND 
			 *
			 * 3. one of the following is true:
			 *    a) the existing index is unique, OR
			 *    b) the existing index is non-unique with uniqueWhenNotNulls
			 *       set to TRUE and the index being created is non-unique, OR
			 *    c) both the existing index and the one being created are
			 *       non-unique and have uniqueWithDuplicateNulls set to FALSE.
             *
             * 4. hasDeferrableChecking is FALSE.
             */
        boolean possibleShare = (irg.isUnique() || !unique) && (bcps.length == baseColumnPositions.length) && !hasDeferrableChecking;
        // is set to true (backing index for unique constraint)
        if (possibleShare && !irg.isUnique()) {
            /* If the existing index has uniqueWithDuplicateNulls set to
				 * TRUE it can be shared by other non-unique indexes; otherwise
				 * the existing non-unique index has uniqueWithDuplicateNulls
				 * set to FALSE, which means the new non-unique conglomerate
				 * can only share if it has uniqueWithDuplicateNulls set to
				 * FALSE, as well.
				 */
            possibleShare = (irg.isUniqueWithDuplicateNulls() || !uniqueWithDuplicateNulls);
        }
        if (possibleShare && indexType.equals(irg.indexType())) {
            for (; j < bcps.length; j++) {
                if ((bcps[j] != baseColumnPositions[j]) || (ia[j] != isAscending[j]))
                    break;
            }
        }
        if (// share
        j == baseColumnPositions.length) {
            /*
				 * Don't allow users to create a duplicate index. Allow if being done internally
				 * for a constraint
				 */
            if (!isConstraint) {
                activation.addWarning(StandardException.newWarning(SQLState.LANG_INDEX_DUPLICATE, indexName, cd.getConglomerateName()));
                return;
            }
            /* Sharing indexes share the physical conglomerate
				 * underneath, so pull the conglomerate number from
				 * the existing conglomerate descriptor.
				 */
            conglomId = cd.getConglomerateNumber();
            /* We create a new IndexRowGenerator because certain
				 * attributes--esp. uniqueness--may be different between
				 * the index we're creating and the conglomerate that
				 * already exists.  I.e. even though we're sharing a
				 * conglomerate, the new index is not necessarily
				 * identical to the existing conglomerate. We have to
				 * keep track of that info so that if we later drop
				 * the shared physical conglomerate, we can figure out
				 * what this index (the one we're creating now) is
				 * really supposed to look like.
				 */
            indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, // uniqueDeferrable
            false, // deferrable indexes are not shared
            false, baseColumnPositions, isAscending, baseColumnPositions.length);
            // DERBY-655 and DERBY-1343
            // Sharing indexes will have unique logical conglomerate UUIDs.
            conglomerateUUID = dd.getUUIDFactory().createUUID();
            shareExisting = true;
            break;
        }
    }
    /* If we have a droppedConglomNum then the index we're about to
		 * "create" already exists--i.e. it has an index descriptor and
		 * the corresponding information is already in the system catalogs.
		 * The only thing we're missing, then, is the physical conglomerate
		 * to back the index (because the old conglomerate was dropped).
		 */
    boolean alreadyHaveConglomDescriptor = (droppedConglomNum > -1L);
    /* If this index already has an essentially same one, we share the
		 * conglomerate with the old one, and just simply add a descriptor
		 * entry into SYSCONGLOMERATES--unless we already have a descriptor,
		 * in which case we don't even need to do that.
		 */
    DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
    if (shareExisting && !alreadyHaveConglomDescriptor) {
        ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
        dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
        // add newly added conglomerate to the list of conglomerate
        // descriptors in the td.
        ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
        cdl.add(cgd);
    // can't just return yet, need to get member "indexTemplateRow"
    // because create constraint may use it
    }
    // Describe the properties of the index to the store using Properties
    // RESOLVE: The following properties assume a BTREE index.
    Properties indexProperties;
    if (properties != null) {
        indexProperties = properties;
    } else {
        indexProperties = new Properties();
    }
    // Tell it the conglomerate id of the base table
    indexProperties.put("baseConglomerateId", Long.toString(td.getHeapConglomerateId()));
    if (uniqueWithDuplicateNulls && !hasDeferrableChecking) {
        if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
            indexProperties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
        } else {
            // index creating a unique index instead.
            if (uniqueWithDuplicateNulls) {
                unique = true;
            }
        }
    }
    // All indexes are unique because they contain the RowLocation.
    // The number of uniqueness columns must include the RowLocation
    // if the user did not specify a unique index.
    indexProperties.put("nUniqueColumns", Integer.toString(unique ? baseColumnPositions.length : baseColumnPositions.length + 1));
    // By convention, the row location column is the last column
    indexProperties.put("rowLocationColumn", Integer.toString(baseColumnPositions.length));
    // For now, all columns are key fields, including the RowLocation
    indexProperties.put("nKeyFields", Integer.toString(baseColumnPositions.length + 1));
    // For now, assume that all index columns are ordered columns
    if (!shareExisting) {
        if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
            indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, uniqueDeferrable, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), baseColumnPositions, isAscending, baseColumnPositions.length);
        } else {
            indexRowGenerator = new IndexRowGenerator(indexType, unique, false, false, false, baseColumnPositions, isAscending, baseColumnPositions.length);
        }
    }
    /* Now add the rows from the base table to the conglomerate.
		 * We do this by scanning the base table and inserting the
		 * rows into a sorter before inserting from the sorter
		 * into the index.  This gives us better performance
		 * and a more compact index.
		 */
    rowSource = null;
    sortId = 0;
    // set to true once the sorter is created
    boolean needToDropSort = false;
    /* bulkFetchSIze will be 16 (for now) unless
		 * we are creating the table in which case it
		 * will be 1.  Too hard to remove scan when
		 * creating index on new table, so minimize
		 * work where we can.
		 */
    int bulkFetchSize = (forCreateTable) ? 1 : 16;
    int numColumns = td.getNumberOfColumns();
    int approximateRowSize = 0;
    // Create the FormatableBitSet for mapping the partial to full base row
    FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
    for (int index = 0; index < baseColumnPositions.length; index++) {
        bitSet.set(baseColumnPositions[index]);
    }
    FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1);
    // Start by opening a full scan on the base table.
    scan = tc.openGroupFetchScan(td.getHeapConglomerateId(), // hold
    false, // open base table read only
    0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, // all fields as objects
    zeroBasedBitSet, // startKeyValue
    (DataValueDescriptor[]) null, // not used when giving null start posn.
    0, // qualifier
    null, // stopKeyValue
    (DataValueDescriptor[]) null, // not used when giving null stop posn.
    0);
    // Create an array to put base row template
    baseRows = new ExecRow[bulkFetchSize];
    indexRows = new ExecIndexRow[bulkFetchSize];
    compactBaseRows = new ExecRow[bulkFetchSize];
    try {
        // Create the array of base row template
        for (int i = 0; i < bulkFetchSize; i++) {
            // create a base row template
            baseRows[i] = activation.getExecutionFactory().getValueRow(maxBaseColumnPosition);
            // create an index row template
            indexRows[i] = indexRowGenerator.getIndexRowTemplate();
            // create a compact base row template
            compactBaseRows[i] = activation.getExecutionFactory().getValueRow(baseColumnPositions.length);
        }
        indexTemplateRow = indexRows[0];
        // Fill the partial row with nulls of the correct type
        ColumnDescriptorList cdl = td.getColumnDescriptorList();
        int cdlSize = cdl.size();
        for (int index = 0, numSet = 0; index < cdlSize; index++) {
            if (!zeroBasedBitSet.get(index)) {
                continue;
            }
            numSet++;
            ColumnDescriptor cd = cdl.elementAt(index);
            DataTypeDescriptor dts = cd.getType();
            for (int i = 0; i < bulkFetchSize; i++) {
                // Put the column in both the compact and sparse base rows
                baseRows[i].setColumn(index + 1, dts.getNull());
                compactBaseRows[i].setColumn(numSet, baseRows[i].getColumn(index + 1));
            }
            // Calculate the approximate row size for the index row
            approximateRowSize += dts.getTypeId().getApproximateLengthInBytes(dts);
        }
        // Get an array of RowLocation template
        RowLocation[] rl = new RowLocation[bulkFetchSize];
        for (int i = 0; i < bulkFetchSize; i++) {
            rl[i] = scan.newRowLocationTemplate();
            // Get an index row based on the base row
            indexRowGenerator.getIndexRow(compactBaseRows[i], rl[i], indexRows[i], bitSet);
        }
        /* now that we got indexTemplateRow, done for sharing index
			 */
        if (shareExisting)
            return;
        /* For non-unique indexes, we order by all columns + the RID.
			 * For unique indexes, we just order by the columns.
			 * We create a unique index observer for unique indexes
			 * so that we can catch duplicate key.
			 * We create a basic sort observer for non-unique indexes
			 * so that we can reuse the wrappers during an external
			 * sort.
			 */
        int numColumnOrderings;
        SortObserver sortObserver;
        Properties sortProperties = null;
        if (unique || uniqueWithDuplicateNulls || uniqueDeferrable) {
            // if the index is a constraint, use constraintname in
            // possible error message
            String indexOrConstraintName = indexName;
            if (conglomerateUUID != null) {
                ConglomerateDescriptor cd = dd.getConglomerateDescriptor(conglomerateUUID);
                if ((isConstraint) && (cd != null && cd.getUUID() != null && td != null)) {
                    ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
                    indexOrConstraintName = conDesc.getConstraintName();
                }
            }
            if (unique || uniqueDeferrable) {
                numColumnOrderings = unique ? baseColumnPositions.length : baseColumnPositions.length + 1;
                sortObserver = new UniqueIndexSortObserver(lcc, constraintID, true, uniqueDeferrable, initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
            } else {
                // unique with duplicate nulls allowed.
                numColumnOrderings = baseColumnPositions.length + 1;
                // tell transaction controller to use the unique with
                // duplicate nulls sorter, when making createSort() call.
                sortProperties = new Properties();
                sortProperties.put(AccessFactoryGlobals.IMPL_TYPE, AccessFactoryGlobals.SORT_UNIQUEWITHDUPLICATENULLS_EXTERNAL);
                // use sort operator which treats nulls unequal
                sortObserver = new UniqueWithDuplicateNullsIndexSortObserver(lcc, constraintID, true, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
            }
        } else {
            numColumnOrderings = baseColumnPositions.length + 1;
            sortObserver = new BasicSortObserver(true, false, indexTemplateRow, true);
        }
        ColumnOrdering[] order = new ColumnOrdering[numColumnOrderings];
        for (int i = 0; i < numColumnOrderings; i++) {
            order[i] = new IndexColumnOrder(i, unique || i < numColumnOrderings - 1 ? isAscending[i] : true);
        }
        // create the sorter
        sortId = tc.createSort(sortProperties, indexTemplateRow.getRowArrayClone(), order, sortObserver, // not in order
        false, scan.getEstimatedRowCount(), // est row size, -1 means no idea
        approximateRowSize);
        needToDropSort = true;
        // Populate sorter and get the output of the sorter into a row
        // source.  The sorter has the indexed columns only and the columns
        // are in the correct order.
        rowSource = loadSorter(baseRows, indexRows, tc, scan, sortId, rl);
        conglomId = tc.createAndLoadConglomerate(indexType, // index row template
        indexTemplateRow.getRowArray(), // colums sort order
        order, indexRowGenerator.getColumnCollationIds(td.getColumnDescriptorList()), indexProperties, // not temporary
        TransactionController.IS_DEFAULT, rowSource, (long[]) null);
    } finally {
        /* close the table scan */
        if (scan != null)
            scan.close();
        /* close the sorter row source before throwing exception */
        if (rowSource != null)
            rowSource.closeRowSource();
        /*
			** drop the sort so that intermediate external sort run can be
			** removed from disk
			*/
        if (needToDropSort)
            tc.dropSort(sortId);
    }
    ConglomerateController indexController = tc.openConglomerate(conglomId, false, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
    // Check to make sure that the conglomerate can be used as an index
    if (!indexController.isKeyed()) {
        indexController.close();
        throw StandardException.newException(SQLState.LANG_NON_KEYED_INDEX, indexName, indexType);
    }
    indexController.close();
    // 
    if (!alreadyHaveConglomDescriptor) {
        ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
        dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
        // add newly added conglomerate to the list of conglomerate
        // descriptors in the td.
        ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
        cdl.add(cgd);
        /* Since we created a new conglomerate descriptor, load
			 * its UUID into the corresponding field, to ensure that
			 * it is properly set in the StatisticsDescriptor created
			 * below.
			 */
        conglomerateUUID = cgd.getUUID();
    }
    CardinalityCounter cCount = (CardinalityCounter) rowSource;
    long numRows = cCount.getRowCount();
    if (addStatistics(dd, indexRowGenerator, numRows)) {
        long[] c = cCount.getCardinality();
        for (int i = 0; i < c.length; i++) {
            StatisticsDescriptor statDesc = new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(), conglomerateUUID, td.getUUID(), "I", new StatisticsImpl(numRows, c[i]), i + 1);
            dd.addDescriptor(statDesc, null, DataDictionary.SYSSTATISTICS_CATALOG_NUM, true, tc);
        }
    }
}
Also used : ClassFactory(org.apache.derby.iapi.services.loader.ClassFactory) DataTypeDescriptor(org.apache.derby.iapi.types.DataTypeDescriptor) ColumnOrdering(org.apache.derby.iapi.store.access.ColumnOrdering) ConglomerateController(org.apache.derby.iapi.store.access.ConglomerateController) DependencyManager(org.apache.derby.iapi.sql.depend.DependencyManager) Properties(java.util.Properties) RowLocationRetRowSource(org.apache.derby.iapi.store.access.RowLocationRetRowSource) DataDescriptorGenerator(org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator) IndexRowGenerator(org.apache.derby.iapi.sql.dictionary.IndexRowGenerator) ColumnDescriptorList(org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList) ConglomerateDescriptorList(org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptorList) FormatableBitSet(org.apache.derby.iapi.services.io.FormatableBitSet) UUID(org.apache.derby.catalog.UUID) RowLocation(org.apache.derby.iapi.types.RowLocation) TypeId(org.apache.derby.iapi.types.TypeId) StatisticsDescriptor(org.apache.derby.iapi.sql.dictionary.StatisticsDescriptor) SchemaDescriptor(org.apache.derby.iapi.sql.dictionary.SchemaDescriptor) ColumnDescriptor(org.apache.derby.iapi.sql.dictionary.ColumnDescriptor) GroupFetchScanController(org.apache.derby.iapi.store.access.GroupFetchScanController) DataDictionary(org.apache.derby.iapi.sql.dictionary.DataDictionary) ExecIndexRow(org.apache.derby.iapi.sql.execute.ExecIndexRow) ConglomerateDescriptor(org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor) TableDescriptor(org.apache.derby.iapi.sql.dictionary.TableDescriptor) SortObserver(org.apache.derby.iapi.store.access.SortObserver) StatisticsImpl(org.apache.derby.catalog.types.StatisticsImpl) LanguageConnectionContext(org.apache.derby.iapi.sql.conn.LanguageConnectionContext) ConstraintDescriptor(org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor) ExecRow(org.apache.derby.iapi.sql.execute.ExecRow) TransactionController(org.apache.derby.iapi.store.access.TransactionController)

Example 88 with ExecRow

use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.

the class BaseActivation method materializeResultSetIfPossible.

/* This method is used to materialize a resultset if can actually fit in the memory
	 * specified by "maxMemoryPerTable" system property.  It converts the result set into
	 * union(union(union...(union(row, row), row), ...row), row).  It returns this
	 * in-memory converted resultset, or the original result set if not converted.
	 * See beetle 4373 for details.
	 *
	 * Optimization implemented as part of Beetle: 4373 can cause severe stack overflow
	 * problems. See JIRA entry DERBY-634. With default MAX_MEMORY_PER_TABLE of 1MG, it is
	 * possible that this optimization could attempt to cache upto 250K rows as nested
	 * union results. At runtime, this would cause stack overflow.
	 *
	 * As Jeff mentioned in DERBY-634, right way to optimize original problem would have been
	 * to address subquery materialization during optimization phase, through hash joins.
	 * Recent Army's optimizer work through DEBRY-781 and related work introduced a way to
	 * materialize subquery results correctly and needs to be extended to cover this case.
	 * While his optimization needs to be made more generic and stable, I propose to avoid
	 * this regression by limiting size of the materialized resultset created here to be
	 * less than MAX_MEMORY_PER_TABLE and MAX_DYNAMIC_MATERIALIZED_ROWS.
	 *
	 *	@param	rs	input result set
	 *	@return	materialized resultset, or original rs if it can't be materialized
	 */
@SuppressWarnings("UseOfObsoleteCollectionType")
public NoPutResultSet materializeResultSetIfPossible(NoPutResultSet rs) throws StandardException {
    rs.openCore();
    Vector<ExecRow> rowCache = new Vector<ExecRow>();
    ExecRow aRow;
    int cacheSize = 0;
    FormatableBitSet toClone = null;
    int maxMemoryPerTable = getLanguageConnectionContext().getOptimizerFactory().getMaxMemoryPerTable();
    aRow = rs.getNextRowCore();
    if (aRow != null) {
        toClone = new FormatableBitSet(aRow.nColumns() + 1);
        toClone.set(1);
    }
    while (aRow != null) {
        cacheSize += aRow.getColumn(1).getLength();
        if (cacheSize > maxMemoryPerTable || rowCache.size() > Optimizer.MAX_DYNAMIC_MATERIALIZED_ROWS)
            break;
        rowCache.addElement(aRow.getClone(toClone));
        aRow = rs.getNextRowCore();
    }
    rs.close();
    if (aRow == null) {
        int rsNum = rs.resultSetNumber();
        int numRows = rowCache.size();
        if (numRows == 0) {
            return new RowResultSet(this, (ExecRow) null, true, rsNum, 0, 0);
        }
        RowResultSet[] rrs = new RowResultSet[numRows];
        UnionResultSet[] urs = new UnionResultSet[numRows - 1];
        for (int i = 0; i < numRows; i++) {
            rrs[i] = new RowResultSet(this, rowCache.elementAt(i), true, rsNum, 1, 0);
            if (i > 0) {
                urs[i - 1] = new UnionResultSet((i > 1) ? (NoPutResultSet) urs[i - 2] : (NoPutResultSet) rrs[0], rrs[i], this, rsNum, i + 1, 0);
            }
        }
        rs.finish();
        if (numRows == 1)
            return rrs[0];
        else
            return urs[urs.length - 1];
    }
    return rs;
}
Also used : ExecRow(org.apache.derby.iapi.sql.execute.ExecRow) FormatableBitSet(org.apache.derby.iapi.services.io.FormatableBitSet) Vector(java.util.Vector)

Example 89 with ExecRow

use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.

the class ConstraintConstantAction method validateConstraint.

/**
 * Evaluate a check constraint or not null column constraint.
 * Generate a query of the
 * form SELECT COUNT(*) FROM t where NOT(<check constraint>)
 * and run it by compiling and executing it.   Will
 * work ok if the table is empty and query returns null.
 *
 * @param constraintName	constraint name
 * @param constraintText	constraint text
 * @param constraintId      constraint id
 * @param td				referenced table
 * @param lcc				the language connection context
 * @param isCheckConstraint	the constraint is a check constraint
 * @param isInitiallyDeferred {@code true} if the constraint is
 *                          initially deferred
 *
 * @return true if null constraint passes, false otherwise
 *
 * @exception StandardException if check constraint fails
 */
static boolean validateConstraint(String constraintName, String constraintText, UUID constraintId, TableDescriptor td, LanguageConnectionContext lcc, boolean isCheckConstraint, boolean isInitiallyDeferred) throws StandardException {
    StringBuilder checkStmt = new StringBuilder();
    /* should not use select sum(not(<check-predicate>) ? 1: 0) because
		 * that would generate much more complicated code and may exceed Java
		 * limits if we have a large number of check constraints, beetle 4347
		 */
    checkStmt.append("SELECT COUNT(*) FROM ");
    checkStmt.append(td.getQualifiedName());
    checkStmt.append(" WHERE NOT(");
    checkStmt.append(constraintText);
    checkStmt.append(")");
    ResultSet rs = null;
    try {
        PreparedStatement ps = lcc.prepareInternalStatement(checkStmt.toString());
        // This is a substatement; for now, we do not set any timeout
        // for it. We might change this behaviour later, by linking
        // timeout to its parent statement's timeout settings.
        rs = ps.executeSubStatement(lcc, false, 0L);
        ExecRow row = rs.getNextRow();
        if (SanityManager.DEBUG) {
            if (row == null) {
                SanityManager.THROWASSERT("did not get any rows back from query: " + checkStmt.toString());
            }
        }
        Number value = ((Number) ((NumberDataValue) row.getRowArray()[0]).getObject());
        /*
			** Value may be null if there are no rows in the
			** table.
			*/
        if ((value != null) && (value.longValue() != 0)) {
            // check constraint is violated.
            if (isCheckConstraint) {
                if (isInitiallyDeferred) {
                    // Remember the violation
                    List<UUID> violatingConstraints = new ArrayList<UUID>();
                    violatingConstraints.add(constraintId);
                    // FIXME: We don't know the row locations of the
                    // violating rows, so for now, just pretend we know one,
                    // then invalidate the row location information forcing
                    // full table check at validation time
                    CheckInfo[] newCi = new CheckInfo[1];
                    DeferredConstraintsMemory.rememberCheckViolations(lcc, td.getObjectID(), td.getSchemaName(), td.getName(), null, violatingConstraints, new HeapRowLocation(), /* dummy */
                    newCi);
                    newCi[0].setInvalidatedRowLocations();
                } else {
                    throw StandardException.newException(SQLState.LANG_ADD_CHECK_CONSTRAINT_FAILED, constraintName, td.getQualifiedName(), value.toString());
                }
            }
            /*
				 * for not null constraint violations exception will be thrown in caller
				 * check constraint will not get here since exception is thrown
				 * above
				 */
            return false;
        }
    } finally {
        if (rs != null) {
            rs.close();
        }
    }
    return true;
}
Also used : HeapRowLocation(org.apache.derby.impl.store.access.heap.HeapRowLocation) ResultSet(org.apache.derby.iapi.sql.ResultSet) ExecRow(org.apache.derby.iapi.sql.execute.ExecRow) ArrayList(java.util.ArrayList) CheckInfo(org.apache.derby.impl.sql.execute.DeferredConstraintsMemory.CheckInfo) PreparedStatement(org.apache.derby.iapi.sql.PreparedStatement) UUID(org.apache.derby.catalog.UUID) NumberDataValue(org.apache.derby.iapi.types.NumberDataValue)

Example 90 with ExecRow

use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.

the class DMLWriteResultSet method makeDeferredSparseRow.

/**
 * For deferred update, get a deferred sparse row based on the
 * deferred non-sparse row. Share the underlying columns. If there
 * is no column bit map, make them the same row.
 *
 * @param deferredBaseRow  the deferred non-sparse row
 * @param baseRowReadList  the columns to include (1-based bit map)
 * @param lcc              the language connection context
 * @return                 the deferred sparse row
 * @exception StandardException		Thrown on error
 */
protected ExecRow makeDeferredSparseRow(ExecRow deferredBaseRow, FormatableBitSet baseRowReadList, LanguageConnectionContext lcc) throws StandardException {
    ExecRow deferredSparseRow;
    if (baseRowReadList == null) {
        /* No sparse row */
        deferredSparseRow = deferredBaseRow;
    } else {
        /*
			** We need to do a fetch doing a partial row
			** read.  We need to shift our 1-based bit
			** set to a zero based bit set like the store
			** expects.
			*/
        deferredSparseRow = RowUtil.getEmptyValueRow(baseRowReadList.getLength() - 1, lcc);
        /*
			** getColumn(), setColumn(), and baseRowReadList are
			** one-based.
			*/
        int fromPosition = 1;
        for (int i = 1; i <= deferredSparseRow.nColumns(); i++) {
            if (baseRowReadList.isSet(i)) {
                deferredSparseRow.setColumn(i, deferredBaseRow.getColumn(fromPosition++));
            }
        }
    }
    return deferredSparseRow;
}
Also used : ExecRow(org.apache.derby.iapi.sql.execute.ExecRow)

Aggregations

ExecRow (org.apache.derby.iapi.sql.execute.ExecRow)155 DataValueDescriptor (org.apache.derby.iapi.types.DataValueDescriptor)62 ExecIndexRow (org.apache.derby.iapi.sql.execute.ExecIndexRow)34 RowLocation (org.apache.derby.iapi.types.RowLocation)27 FormatableBitSet (org.apache.derby.iapi.services.io.FormatableBitSet)23 ConglomerateController (org.apache.derby.iapi.store.access.ConglomerateController)22 ScanController (org.apache.derby.iapi.store.access.ScanController)22 SQLVarchar (org.apache.derby.iapi.types.SQLVarchar)22 SQLChar (org.apache.derby.iapi.types.SQLChar)21 SQLLongint (org.apache.derby.iapi.types.SQLLongint)21 UUID (org.apache.derby.catalog.UUID)19 Properties (java.util.Properties)12 TransactionController (org.apache.derby.iapi.store.access.TransactionController)12 CursorResultSet (org.apache.derby.iapi.sql.execute.CursorResultSet)11 ColumnDescriptor (org.apache.derby.iapi.sql.dictionary.ColumnDescriptor)10 ConglomerateDescriptor (org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor)10 DataTypeDescriptor (org.apache.derby.iapi.types.DataTypeDescriptor)10 UserType (org.apache.derby.iapi.types.UserType)9 SchemaDescriptor (org.apache.derby.iapi.sql.dictionary.SchemaDescriptor)8 ConstraintDescriptor (org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor)7