use of org.apache.derby.catalog.UUID in project derby by apache.
the class CreateViewConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for CREATE VIEW.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID toid;
ColumnDescriptor columnDescriptor;
ViewDescriptor vd;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
SchemaDescriptor sd = DDLConstantAction.getSchemaDescriptorForCreate(dd, activation, schemaName);
/* Create a new table descriptor.
* (Pass in row locking, even though meaningless for views.)
*/
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
td = ddg.newTableDescriptor(tableName, sd, tableType, TableDescriptor.ROW_LOCK_GRANULARITY);
dd.addDescriptor(td, sd, DataDictionary.SYSTABLES_CATALOG_NUM, false, tc);
toid = td.getUUID();
// for each column, stuff system.column
ColumnDescriptor[] cdlArray = new ColumnDescriptor[columnInfo.length];
int index = 1;
for (int ix = 0; ix < columnInfo.length; ix++) {
columnDescriptor = new ColumnDescriptor(columnInfo[ix].name, index++, columnInfo[ix].dataType, columnInfo[ix].defaultValue, columnInfo[ix].defaultInfo, td, (UUID) null, columnInfo[ix].autoincStart, columnInfo[ix].autoincInc, columnInfo[ix].autoincCycle);
cdlArray[ix] = columnDescriptor;
}
dd.addDescriptorArray(cdlArray, td, DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc);
// add columns to the column descriptor list.
ColumnDescriptorList cdl = td.getColumnDescriptorList();
for (int i = 0; i < cdlArray.length; i++) cdl.add(cdlArray[i]);
/* Get and add a view descriptor */
vd = ddg.newViewDescriptor(toid, tableName, viewText, checkOption, (compSchemaId == null) ? lcc.getDefaultSchema().getUUID() : compSchemaId);
for (int ix = 0; ix < providerInfo.length; ix++) {
/* We should always be able to find the Provider */
Provider provider = (Provider) providerInfo[ix].getDependableFinder().getDependable(dd, providerInfo[ix].getObjectId());
dm.addDependency(vd, provider, lcc.getContextManager());
}
// store view's dependency on various privileges in the dependeny system
storeViewTriggerDependenciesOnPrivileges(activation, vd);
dd.addDescriptor(vd, sd, DataDictionary.SYSVIEWS_CATALOG_NUM, true, tc);
}
use of org.apache.derby.catalog.UUID in project derby by apache.
the class DDLConstantAction method addColumnDependencies.
/**
* Add dependencies of a column on providers. These can arise if a generated column depends
* on a user created function.
*/
protected void addColumnDependencies(LanguageConnectionContext lcc, DataDictionary dd, TableDescriptor td, ColumnInfo ci) throws StandardException {
ProviderInfo[] providers = ci.providers;
if (providers != null) {
DependencyManager dm = dd.getDependencyManager();
ContextManager cm = lcc.getContextManager();
int providerCount = providers.length;
ColumnDescriptor cd = td.getColumnDescriptor(ci.name);
DefaultDescriptor defDesc = cd.getDefaultDescriptor(dd);
for (int px = 0; px < providerCount; px++) {
ProviderInfo pi = providers[px];
DependableFinder finder = pi.getDependableFinder();
UUID providerID = pi.getObjectId();
Provider provider = (Provider) finder.getDependable(dd, providerID);
dm.addDependency(defDesc, provider, cm);
}
// end loop through providers
}
}
use of org.apache.derby.catalog.UUID in project derby by apache.
the class InsertResultSet method emptyIndexes.
/**
* Empty the indexes after doing a bulk insert replace
* where the table has 0 rows after the replace.
* RESOLVE: This method is ugly! Prior to 2.0, we simply
* scanned back across the table to build the indexes. We
* changed this in 2.0 to populate the sorters via a call back
* as we populated the table. Doing a 0 row replace into a
* table with indexes is a degenerate case, hence we allow
* ugly and unoptimized code.
*
* @exception StandardException Thrown on failure
*/
private void emptyIndexes(long newHeapConglom, InsertConstantAction constants, TableDescriptor td, DataDictionary dd, ExecRow fullTemplate) throws StandardException {
int numIndexes = constants.irgs.length;
ExecIndexRow[] idxRows = new ExecIndexRow[numIndexes];
ExecRow baseRows;
ColumnOrdering[][] order = new ColumnOrdering[numIndexes][];
int numColumns = td.getNumberOfColumns();
collation = new int[numIndexes][];
// Create the BitSet for mapping the partial row to the full row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
// Need to check each index for referenced columns
int numReferencedColumns = 0;
for (int index = 0; index < numIndexes; index++) {
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
for (int bcp = 0; bcp < baseColumnPositions.length; bcp++) {
if (!bitSet.get(baseColumnPositions[bcp])) {
bitSet.set(baseColumnPositions[bcp]);
numReferencedColumns++;
}
}
}
// We can finally create the partial base row
baseRows = activation.getExecutionFactory().getValueRow(numReferencedColumns);
// Fill in each base row with nulls of the correct data type
int colNumber = 0;
for (int index = 0; index < numColumns; index++) {
if (bitSet.get(index + 1)) {
colNumber++;
// NOTE: 1-based column numbers
baseRows.setColumn(colNumber, fullTemplate.getColumn(index + 1).cloneValue(false));
}
}
needToDropSort = new boolean[numIndexes];
sortIds = new long[numIndexes];
/* Do the initial set up before scanning the heap.
* For each index, build a single index row and a sorter.
*/
for (int index = 0; index < numIndexes; index++) {
// create a single index row template for each index
idxRows[index] = constants.irgs[index].getIndexRowTemplate();
// Get an index row based on the base row
// (This call is only necessary here because we need to pass a
// template to the sorter.)
constants.irgs[index].getIndexRow(baseRows, rl, idxRows[index], bitSet);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key
*/
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
boolean[] isAscending = constants.irgs[index].isAscending();
int numColumnOrderings;
SortObserver sortObserver;
final IndexRowGenerator indDes = cd.getIndexDescriptor();
if (indDes.isUnique() || indDes.isUniqueDeferrable()) {
numColumnOrderings = indDes.isUnique() ? baseColumnPositions.length : baseColumnPositions.length + 1;
String indexOrConstraintName = cd.getConglomerateName();
boolean deferred = false;
boolean uniqueDeferrable = false;
UUID uniqueDeferrableConstraintId = null;
if (cd.isConstraint()) {
// so, the index is backing up a constraint
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
deferred = lcc.isEffectivelyDeferred(lcc.getCurrentSQLSessionContext(activation), conDesc.getUUID());
uniqueDeferrable = conDesc.deferrable();
uniqueDeferrableConstraintId = conDesc.getUUID();
}
sortObserver = new UniqueIndexSortObserver(lcc, uniqueDeferrableConstraintId, // don't clone rows
false, uniqueDeferrable, deferred, indexOrConstraintName, idxRows[index], true, td.getName());
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(false, false, idxRows[index], true);
}
order[index] = new ColumnOrdering[numColumnOrderings];
for (int ii = 0; ii < isAscending.length; ii++) {
order[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
if (numColumnOrderings > isAscending.length) {
order[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
}
// create the sorters
sortIds[index] = tc.createSort((Properties) null, idxRows[index].getRowArrayClone(), order[index], sortObserver, // not in order
false, // est rows
rowCount, // est row size, -1 means no idea
-1);
needToDropSort[index] = true;
}
// Populate sorters and get the output of each sorter into a row
// source. The sorters have the indexed columns only and the columns
// are in the correct order.
rowSources = new RowLocationRetRowSource[numIndexes];
// Fill in the RowSources
SortController[] sorter = new SortController[numIndexes];
for (int index = 0; index < numIndexes; index++) {
sorter[index] = tc.openSort(sortIds[index]);
sorter[index].completedInserts();
rowSources[index] = tc.openSortRowSource(sortIds[index]);
}
long[] newIndexCongloms = new long[numIndexes];
// Populate each index
for (int index = 0; index < numIndexes; index++) {
ConglomerateController indexCC;
Properties properties = new Properties();
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
// Build the properties list for the new conglomerate
indexCC = tc.openCompiledConglomerate(false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, constants.indexSCOCIs[index], indexDCOCIs[index]);
// Get the properties on the old index
indexCC.getInternalTablePropertySet(properties);
/* Create the properties that language supplies when creating the
* the index. (The store doesn't preserve these.)
*/
int indexRowLength = idxRows[index].nColumns();
properties.put("baseConglomerateId", Long.toString(newHeapConglom));
if (cd.getIndexDescriptor().isUnique()) {
properties.put("nUniqueColumns", Integer.toString(indexRowLength - 1));
} else {
properties.put("nUniqueColumns", Integer.toString(indexRowLength));
}
if (cd.getIndexDescriptor().isUniqueWithDuplicateNulls() && !cd.getIndexDescriptor().hasDeferrableChecking()) {
properties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
}
properties.put("rowLocationColumn", Integer.toString(indexRowLength - 1));
properties.put("nKeyFields", Integer.toString(indexRowLength));
indexCC.close();
collation[index] = constants.irgs[index].getColumnCollationIds(td.getColumnDescriptorList());
// We can finally drain the sorter and rebuild the index
// Populate the index.
newIndexCongloms[index] = tc.createAndLoadConglomerate("BTREE", idxRows[index].getRowArray(), // default column sort order
null, collation[index], properties, TransactionController.IS_DEFAULT, rowSources[index], (long[]) null);
/* Update the DataDictionary
*
* Update sys.sysconglomerates with new conglomerate #, if the
* conglomerate is shared by duplicate indexes, all the descriptors
* for those indexes need to be updated with the new number.
*/
dd.updateConglomerateDescriptor(td.getConglomerateDescriptors(constants.indexCIDS[index]), newIndexCongloms[index], tc);
// Drop the old conglomerate
tc.dropConglomerate(constants.indexCIDS[index]);
}
}
use of org.apache.derby.catalog.UUID in project derby by apache.
the class DropTableConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for DROP TABLE.
*
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID tableID;
ConglomerateDescriptor[] cds;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
if ((sd != null) && sd.getSchemaName().equals(SchemaDescriptor.STD_DECLARED_GLOBAL_TEMPORARY_TABLES_SCHEMA_NAME)) {
// check if this is a temp table before checking data dictionary
td = lcc.getTableDescriptorForDeclaredGlobalTempTable(tableName);
if (// td null here means it is not a temporary table. Look for table in physical SESSION schema
td == null)
td = dd.getTableDescriptor(tableName, sd, tc);
if (// td null means tableName is not a temp table and it is not a physical table in SESSION schema
td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, fullTableName);
}
if (td.getTableType() == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
dm.invalidateFor(td, DependencyManager.DROP_TABLE, lcc);
tc.dropConglomerate(td.getHeapConglomerateId());
lcc.dropDeclaredGlobalTempTable(tableName);
return;
}
}
/* Lock the table before we access the data dictionary
* to prevent deadlocks.
*
* Note that for DROP TABLE replayed at Targets during REFRESH,
* the conglomerateNumber will be 0. That's ok. During REFRESH,
* we don't need to lock the conglomerate.
*/
if (conglomerateNumber != 0) {
lockTableForDDL(tc, conglomerateNumber, true);
}
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
/* Get the table descriptor. */
td = dd.getTableDescriptor(tableId);
if (td == null) {
throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, fullTableName);
}
/* Get an exclusive table lock on the table. */
long heapId = td.getHeapConglomerateId();
lockTableForDDL(tc, heapId, true);
/* Drop the triggers */
for (TriggerDescriptor trd : dd.getTriggerDescriptors(td)) {
trd.drop(lcc);
}
/* Drop all defaults */
ColumnDescriptorList cdl = td.getColumnDescriptorList();
for (ColumnDescriptor cd : cdl) {
//
if (cd.isAutoincrement() && dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_11, null)) {
dropIdentitySequence(dd, td, activation);
}
// any dependencies
if (cd.getDefaultInfo() != null) {
DefaultDescriptor defaultDesc = cd.getDefaultDescriptor(dd);
dm.clearDependencies(lcc, defaultDesc);
}
}
/* Drop the columns */
dd.dropAllColumnDescriptors(tableId, tc);
/* Drop all table and column permission descriptors */
dd.dropAllTableAndColPermDescriptors(tableId, tc);
/* Drop the constraints */
dropAllConstraintDescriptors(td, activation);
/*
** Drop all the conglomerates. Drop the heap last, because the
** store needs it for locking the indexes when they are dropped.
*/
cds = td.getConglomerateDescriptors();
long[] dropped = new long[cds.length - 1];
int numDropped = 0;
for (int index = 0; index < cds.length; index++) {
ConglomerateDescriptor cd = cds[index];
/* if it's for an index, since similar indexes share one
* conglomerate, we only drop the conglomerate once
*/
if (cd.getConglomerateNumber() != heapId) {
long thisConglom = cd.getConglomerateNumber();
int i;
for (i = 0; i < numDropped; i++) {
if (dropped[i] == thisConglom)
break;
}
if (// not dropped
i == numDropped) {
dropped[numDropped++] = thisConglom;
tc.dropConglomerate(thisConglom);
dd.dropStatisticsDescriptors(td.getUUID(), cd.getUUID(), tc);
}
}
}
/* Prepare all dependents to invalidate. (This is there chance
* to say that they can't be invalidated. For example, an open
* cursor referencing a table/view that the user is attempting to
* drop.) If no one objects, then invalidate any dependent objects.
* We check for invalidation before we drop the table descriptor
* since the table descriptor may be looked up as part of
* decoding tuples in SYSDEPENDS.
*/
dm.invalidateFor(td, DependencyManager.DROP_TABLE, lcc);
//
// The table itself can depend on the user defined types of its columns.
// Drop all of those dependencies now.
//
adjustUDTDependencies(lcc, dd, td, null, true);
/* Drop the table */
dd.dropTableDescriptor(td, sd, tc);
/* Drop the conglomerate descriptors */
dd.dropAllConglomerateDescriptors(td, tc);
/* Drop the store element at last, to prevent dangling reference
* for open cursor, beetle 4393.
*/
tc.dropConglomerate(heapId);
}
use of org.apache.derby.catalog.UUID in project derby by apache.
the class JarUtil method add.
/**
* Add a jar file to the current connection's database.
*
* <P> The reason for adding the jar file in this private instance
* method is that it allows us to share set up logic with drop and
* replace.
* @param is A stream for reading the content of the file to add.
* @exception StandardException Opps
*/
private long add(final InputStream is) throws StandardException {
//
// Like create table we say we are writing before we read the dd
dd.startWriting(lcc);
FileInfoDescriptor fid = getInfo();
if (fid != null)
throw StandardException.newException(SQLState.LANG_OBJECT_ALREADY_EXISTS_IN_OBJECT, fid.getDescriptorType(), sqlName, fid.getSchemaDescriptor().getDescriptorType(), schemaName);
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, null, true);
try {
notifyLoader(false);
dd.invalidateAllSPSPlans();
UUID id = BaseActivation.getMonitor().getUUIDFactory().createUUID();
final String jarExternalName = JarUtil.mkExternalName(id, schemaName, sqlName, fr.getSeparatorChar());
long generationId = setJar(jarExternalName, is, true, 0L);
fid = ddg.newFileInfoDescriptor(id, sd, sqlName, generationId);
dd.addDescriptor(fid, sd, DataDictionary.SYSFILES_CATALOG_NUM, false, lcc.getTransactionExecute());
return generationId;
} finally {
notifyLoader(true);
}
}
Aggregations