use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class DD_Version method dropSystemCatalog.
/**
* Drop a System catalog.
* @param tc TransactionController
* @param crf CatalogRowFactory for the catalog to drop.
* @exception StandardException Standard Derby error policy.
*/
protected void dropSystemCatalog(TransactionController tc, CatalogRowFactory crf) throws StandardException {
SchemaDescriptor sd = bootingDictionary.getSystemSchemaDescriptor();
TableDescriptor td = bootingDictionary.getTableDescriptor(crf.getCatalogName(), sd, tc);
ConglomerateDescriptor[] cds = td.getConglomerateDescriptors();
for (int index = 0; index < cds.length; index++) {
tc.dropConglomerate(cds[index].getConglomerateNumber());
}
dropSystemCatalogDescription(tc, td);
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class HashJoinStrategy method divideUpPredicateLists.
/**
* @see JoinStrategy#divideUpPredicateLists
*
* @exception StandardException Thrown on error
*/
public void divideUpPredicateLists(Optimizable innerTable, OptimizablePredicateList originalRestrictionList, OptimizablePredicateList storeRestrictionList, OptimizablePredicateList nonStoreRestrictionList, OptimizablePredicateList requalificationRestrictionList, DataDictionary dd) throws StandardException {
/*
** If we are walking a non-covering index, then all predicates that
** get evaluated in the HashScanResultSet, whether during the building
** or probing of the hash table, need to be evaluated at both the
** IndexRowToBaseRowResultSet and the HashScanResultSet to ensure
** that the rows materialized into the hash table still qualify when
** we go to read the row from the heap. This also includes predicates
** that are not qualifier/start/stop keys (hence not in store/non-store
** list).
*/
originalRestrictionList.copyPredicatesToOtherList(requalificationRestrictionList);
ConglomerateDescriptor cd = innerTable.getTrulyTheBestAccessPath().getConglomerateDescriptor();
/* For the inner table of a hash join, then divide up the predicates:
*
* o restrictionList - predicates that get applied when creating
* the hash table (single table clauses)
*
* o nonBaseTableRestrictionList
* - those that get applied when probing into the
* hash table (equijoin clauses on key columns,
* ordered by key column position first, followed
* by any other join predicates. (All predicates
* in this list are qualifiers which can be
* evaluated in the store).
*
* o baseTableRL - Only applicable if this is not a covering
* index. In that case, we will need to
* requalify the data page. Thus, this list
* will include all predicates.
*/
// Build the list to be applied when creating the hash table
originalRestrictionList.transferPredicates(storeRestrictionList, innerTable.getReferencedTableMap(), innerTable);
/*
* Eliminate any non-qualifiers that may have been pushed, but
* are redundant and not useful for hash join.
*
* For instance "in" (or other non-qualifier) was pushed down for
* start/stop key, * but for hash join, it may no longer be because
* previous key column may have been disqualified (eg., correlation).
* We simply remove
* such non-qualifier ("in") because we left it as residual predicate
* anyway. It's easier/safer to filter it out here than detect it
* ealier (and not push it down). Beetle 4316.
*
* Can't filter out OR list, as it is not a residual predicate,
*/
for (int i = storeRestrictionList.size() - 1; i >= 0; i--) {
Predicate p1 = (Predicate) storeRestrictionList.getOptPredicate(i);
if (!p1.isStoreQualifier() && !p1.isStartKey() && !p1.isStopKey()) {
storeRestrictionList.removeOptPredicate(i);
}
}
for (int i = originalRestrictionList.size() - 1; i >= 0; i--) {
Predicate p1 = (Predicate) originalRestrictionList.getOptPredicate(i);
if (!p1.isStoreQualifier())
originalRestrictionList.removeOptPredicate(i);
}
/* Copy the rest of the predicates to the non-store list */
originalRestrictionList.copyPredicatesToOtherList(nonStoreRestrictionList);
/* If innerTable is ProjectRestrictNode, we need to use its child
* to find hash key columns, this is because ProjectRestrictNode may
* not have underlying node's every result column as result column,
* and the predicate's column reference was bound to the underlying
* node's column position. Also we have to pass in the
* ProjectRestrictNode rather than the underlying node to this method
* because a predicate's referencedTableMap references the table number
* of the ProjectRestrictiveNode. And we need this info to see if
* a predicate is in storeRestrictionList that can be pushed down.
* Beetle 3458.
*/
Optimizable hashTableFor = innerTable;
if (innerTable instanceof ProjectRestrictNode) {
ProjectRestrictNode prn = (ProjectRestrictNode) innerTable;
if (prn.getChildResult() instanceof Optimizable)
hashTableFor = (Optimizable) (prn.getChildResult());
}
int[] hashKeyColumns = findHashKeyColumns(hashTableFor, cd, nonStoreRestrictionList);
if (hashKeyColumns != null) {
innerTable.setHashKeyColumns(hashKeyColumns);
} else {
String name;
if (cd != null && cd.isIndex()) {
name = cd.getConglomerateName();
} else {
name = innerTable.getBaseTableName();
}
throw StandardException.newException(SQLState.LANG_HASH_NO_EQUIJOIN_FOUND, name, innerTable.getBaseTableName());
}
// Mark all of the predicates in the probe list as qualifiers
nonStoreRestrictionList.markAllPredicatesQualifiers();
int[] conglomColumn = new int[hashKeyColumns.length];
if (cd != null && cd.isIndex()) {
/*
** If the conglomerate is an index, get the column numbers of the
** hash keys in the base heap.
*/
for (int index = 0; index < hashKeyColumns.length; index++) {
conglomColumn[index] = cd.getIndexDescriptor().baseColumnPositions()[hashKeyColumns[index]];
}
} else {
/*
** If the conglomerate is a heap, the column numbers of the hash
** key are the column numbers returned by findHashKeyColumns().
**
** NOTE: Must switch from zero-based to one-based
*/
for (int index = 0; index < hashKeyColumns.length; index++) {
conglomColumn[index] = hashKeyColumns[index] + 1;
}
}
/* Put the equality predicates on the key columns for the hash first.
* (Column # is columns[colCtr] from above.)
*/
for (int index = hashKeyColumns.length - 1; index >= 0; index--) {
nonStoreRestrictionList.putOptimizableEqualityPredicateFirst(innerTable, conglomColumn[index]);
}
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class RenameNode method bindStatement.
// We inherit the generate() method from DDLStatementNode.
/**
* Bind this node. This means doing any static error checking that
* can be done before actually renaming the table/column/index.
*
* For a table rename: looking up the from table, verifying it exists
* verifying it's not a system table, verifying it's not view
* and looking up to table, verifying it doesn't exist.
*
* For a column rename: looking up the table, verifying it exists,
* verifying it's not a system table, verifying it's not view, verifying
* the from column exists, verifying the to column doesn't exist.
*
* For a index rename: looking up the table, verifying it exists,
* verifying it's not a system table, verifying it's not view, verifying
* the from index exists, verifying the to index doesn't exist.
*
* @exception StandardException Thrown on error
*/
@Override
public void bindStatement() throws StandardException {
CompilerContext cc = getCompilerContext();
DataDictionary dd = getDataDictionary();
ConglomerateDescriptor cd;
SchemaDescriptor sd;
/* in case of rename index, the only thing we get from parser is
* current and new index names with no information about the
* table it belongs to. This is because index names are unique
* within a schema and hence then is no need to qualify an index
* name with a table name which we have to do for rename column.
* But from the index name, using the data dictionary, you can
* find the table it belongs to. Since most of the checking
* in bind is done using table descriptor, in the following if
* statement, we are trying to get the table information from the
* index name so it is available for the rest of he bind code.
*/
TableName baseTable;
if (renamingWhat == StatementType.RENAME_INDEX) {
sd = getSchemaDescriptor((String) null);
ConglomerateDescriptor indexDescriptor = dd.getConglomerateDescriptor(oldObjectName, sd, false);
if (indexDescriptor == null)
throw StandardException.newException(SQLState.LANG_INDEX_NOT_FOUND, oldObjectName);
/* Get the table descriptor */
td = dd.getTableDescriptor(indexDescriptor.getTableID());
initAndCheck(makeTableName(td.getSchemaName(), td.getName()));
} else
sd = getSchemaDescriptor();
td = getTableDescriptor();
// throw an exception if user is attempting a rename on temporary table
if (td.getTableType() == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
throw StandardException.newException(SQLState.LANG_NOT_ALLOWED_FOR_DECLARED_GLOBAL_TEMP_TABLE);
}
switch(this.renamingWhat) {
case StatementType.RENAME_TABLE:
/* Verify that new table name does not exist in the database */
TableDescriptor tabDesc = getTableDescriptor(newObjectName, sd);
if (tabDesc != null)
throw descriptorExistsException(tabDesc, sd);
renameTableBind(dd);
break;
case StatementType.RENAME_COLUMN:
renameColumnBind(dd);
break;
case StatementType.RENAME_INDEX:
ConglomerateDescriptor conglomDesc = dd.getConglomerateDescriptor(newObjectName, sd, false);
if (conglomDesc != null)
throw descriptorExistsException(conglomDesc, sd);
break;
default:
if (SanityManager.DEBUG)
SanityManager.THROWASSERT("Unexpected rename action in RenameNode");
break;
}
conglomerateNumber = td.getHeapConglomerateId();
/* Get the base conglomerate descriptor */
cd = td.getConglomerateDescriptor(conglomerateNumber);
/* Statement is dependent on the TableDescriptor and ConglomerateDescriptor */
cc.createDependency(td);
cc.createDependency(cd);
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class TableNameInfo method getTableName.
public String getTableName(Long conglomId) {
if (conglomId == null)
return "?";
// see if we have already seen this conglomerate
TableDescriptor td = tableCache.get(conglomId);
if (td == null) {
// first time we see this conglomerate, get it from the
// ddCache
ConglomerateDescriptor cd = ddCache.get(conglomId);
if (cd != null) {
// conglomerateDescriptor is not null, this table is known
// to the data dictionary
td = tdCache.get(cd.getTableID());
}
if ((cd == null) || (td == null)) {
String name;
// the data dictionary
if (conglomId.longValue() > 20) {
// table probably dropped!
name = "*** TRANSIENT_" + conglomId;
} else {
// one of the internal tables -- HACK!!
switch(conglomId.intValue()) {
case 0:
name = "*** INVALID CONGLOMERATE ***";
break;
case 1:
name = "ConglomerateDirectory";
break;
case 2:
name = "PropertyConglomerate";
break;
default:
name = "*** INTERNAL TABLE " + conglomId;
break;
}
}
return name;
}
tableCache.put(conglomId, td);
if ((indexCache != null) && cd.isIndex())
indexCache.put(conglomId, cd.getConglomerateName());
}
return td.getName();
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class FromBaseTable method getScanArguments.
private int getScanArguments(ExpressionClassBuilder acb, MethodBuilder mb) throws StandardException {
// Put the result row template in the saved objects.
int resultRowTemplate = acb.addItem(getResultColumns().buildRowTemplate(referencedCols, false));
// pass in the referenced columns on the saved objects
// chain
int colRefItem = -1;
if (referencedCols != null) {
colRefItem = acb.addItem(referencedCols);
}
// beetle entry 3865: updateable cursor using index
int indexColItem = -1;
if (isCursorTargetTable() || getUpdateLocks) {
ConglomerateDescriptor cd = getTrulyTheBestAccessPath().getConglomerateDescriptor();
if (cd.isIndex()) {
int[] baseColPos = cd.getIndexDescriptor().baseColumnPositions();
boolean[] isAscending = cd.getIndexDescriptor().isAscending();
int[] indexCols = new int[baseColPos.length];
for (int i = 0; i < indexCols.length; i++) indexCols[i] = isAscending[i] ? baseColPos[i] : -baseColPos[i];
indexColItem = acb.addItem(indexCols);
}
}
AccessPath ap = getTrulyTheBestAccessPath();
JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
/*
** We can only do bulkFetch on NESTEDLOOP
*/
if (SanityManager.DEBUG) {
if ((!trulyTheBestJoinStrategy.bulkFetchOK()) && (bulkFetch != UNSET)) {
SanityManager.THROWASSERT("bulkFetch should not be set " + "for the join strategy " + trulyTheBestJoinStrategy.getName());
}
}
int nargs = trulyTheBestJoinStrategy.getScanArgs(getLanguageConnectionContext().getTransactionCompile(), mb, this, storeRestrictionList, nonStoreRestrictionList, acb, bulkFetch, resultRowTemplate, colRefItem, indexColItem, getTrulyTheBestAccessPath().getLockMode(), (tableDescriptor.getLockGranularity() == TableDescriptor.TABLE_LOCK_GRANULARITY), getCompilerContext().getScanIsolationLevel(), ap.getOptimizer().getMaxMemoryPerTable(), multiProbing);
return nargs;
}
Aggregations