use of org.apache.derby.iapi.sql.dictionary.SchemaDescriptor in project derby by apache.
the class DataDictionaryImpl method getTableDescriptor.
/**
* Get the descriptor for the named table within the given schema.
* If the schema parameter is NULL, it looks for the table in the
* current (default) schema. Table descriptors include object ids,
* object types (table, view, etc.)
*
* @param tableName The name of the table to get the descriptor for
* @param schema The descriptor for the schema the table lives in.
* If null, use the system schema.
* @return The descriptor for the table, null if table does not
* exist.
*
* @exception StandardException Thrown on failure
*/
public TableDescriptor getTableDescriptor(String tableName, SchemaDescriptor schema, TransactionController tc) throws StandardException {
TableDescriptor retval = null;
/*
** If we didn't get a schema descriptor, we had better
** have a system table.
*/
if (SanityManager.DEBUG) {
if ((schema == null) && !tableName.startsWith("SYS")) {
SanityManager.THROWASSERT("null schema for non system table " + tableName);
}
}
SchemaDescriptor sd = (schema == null) ? getSystemSchemaDescriptor() : schema;
UUID schemaUUID = sd.getUUID();
if (SchemaDescriptor.STD_SYSTEM_DIAG_SCHEMA_NAME.equals(sd.getSchemaName())) {
TableDescriptor td = new TableDescriptor(this, tableName, sd, TableDescriptor.VTI_TYPE, TableDescriptor.DEFAULT_LOCK_GRANULARITY);
// ensure a vti class exists
if (getVTIClass(td, false) != null)
return td;
// otherwise just standard search
}
TableKey tableKey = new TableKey(schemaUUID, tableName);
/* Only use the cache if we're in compile-only mode */
if (getCacheMode() == DataDictionary.COMPILE_ONLY_MODE) {
NameTDCacheable cacheEntry = (NameTDCacheable) nameTdCache.find(tableKey);
if (cacheEntry != null) {
retval = cacheEntry.getTableDescriptor();
// bind in previous command might have set refernced cols
retval.setReferencedColumnMap(null);
nameTdCache.release(cacheEntry);
}
return retval;
}
return getTableDescriptorIndex1Scan(tableName, schemaUUID.toString());
}
use of org.apache.derby.iapi.sql.dictionary.SchemaDescriptor in project derby by apache.
the class DataDictionaryImpl method createDictionaryTables.
/*
** Methods related to create
*/
/**
* Create all the required dictionary tables. Any classes that extend this class
* and need to create new tables should override this method, and then
* call this method as the first action in the new method, e.g.
* <PRE>
* protected Configuration createDictionaryTables(Configuration cfg, TransactionController tc,
* DataDescriptorGenerator ddg)
* throws StandardException
* {
* super.createDictionaryTables(params, tc, ddg);
*
* ...
* }
* </PRE>
*
* @exception StandardException Standard Derby error policy
*/
protected void createDictionaryTables(Properties params, TransactionController tc, DataDescriptorGenerator ddg) throws StandardException {
/*
** Create a new schema descriptor -- with no args
** creates the system schema descriptor in which
** all tables reside (SYS)
*/
systemSchemaDesc = newSystemSchemaDesc(SchemaDescriptor.STD_SYSTEM_SCHEMA_NAME, SchemaDescriptor.SYSTEM_SCHEMA_UUID);
/* Create the core tables and generate the UUIDs for their
* heaps (before creating the indexes).
* RESOLVE - This loop will eventually drive all of the
* work for creating the core tables.
*/
for (int coreCtr = 0; coreCtr < NUM_CORE; coreCtr++) {
TabInfoImpl ti = coreInfo[coreCtr];
Properties heapProperties = ti.getCreateHeapProperties();
ti.setHeapConglomerate(createConglomerate(ti.getTableName(), tc, ti.getCatalogRowFactory().makeEmptyRow(), heapProperties));
// bootstrap indexes on core tables before bootstraping the tables themselves
if (coreInfo[coreCtr].getNumberOfIndexes() > 0) {
bootStrapSystemIndexes(systemSchemaDesc, tc, ddg, ti);
}
}
// bootstrap the core tables into the data dictionary
for (int ictr = 0; ictr < NUM_CORE; ictr++) {
/* RESOLVE - need to do something with COLUMNTYPE in following table creating code */
TabInfoImpl ti = coreInfo[ictr];
addSystemTableToDictionary(ti, systemSchemaDesc, tc, ddg);
}
// Add the bootstrap information to the configuration
params.put(CFG_SYSTABLES_ID, Long.toString(coreInfo[SYSTABLES_CORE_NUM].getHeapConglomerate()));
params.put(CFG_SYSTABLES_INDEX1_ID, Long.toString(coreInfo[SYSTABLES_CORE_NUM].getIndexConglomerate(((SYSTABLESRowFactory) coreInfo[SYSTABLES_CORE_NUM].getCatalogRowFactory()).SYSTABLES_INDEX1_ID)));
params.put(CFG_SYSTABLES_INDEX2_ID, Long.toString(coreInfo[SYSTABLES_CORE_NUM].getIndexConglomerate(((SYSTABLESRowFactory) coreInfo[SYSTABLES_CORE_NUM].getCatalogRowFactory()).SYSTABLES_INDEX2_ID)));
params.put(CFG_SYSCOLUMNS_ID, Long.toString(coreInfo[SYSCOLUMNS_CORE_NUM].getHeapConglomerate()));
params.put(CFG_SYSCOLUMNS_INDEX1_ID, Long.toString(coreInfo[SYSCOLUMNS_CORE_NUM].getIndexConglomerate(((SYSCOLUMNSRowFactory) coreInfo[SYSCOLUMNS_CORE_NUM].getCatalogRowFactory()).SYSCOLUMNS_INDEX1_ID)));
params.put(CFG_SYSCOLUMNS_INDEX2_ID, Long.toString(coreInfo[SYSCOLUMNS_CORE_NUM].getIndexConglomerate(((SYSCOLUMNSRowFactory) coreInfo[SYSCOLUMNS_CORE_NUM].getCatalogRowFactory()).SYSCOLUMNS_INDEX2_ID)));
params.put(CFG_SYSCONGLOMERATES_ID, Long.toString(coreInfo[SYSCONGLOMERATES_CORE_NUM].getHeapConglomerate()));
params.put(CFG_SYSCONGLOMERATES_INDEX1_ID, Long.toString(coreInfo[SYSCONGLOMERATES_CORE_NUM].getIndexConglomerate(((SYSCONGLOMERATESRowFactory) coreInfo[SYSCONGLOMERATES_CORE_NUM].getCatalogRowFactory()).SYSCONGLOMERATES_INDEX1_ID)));
params.put(CFG_SYSCONGLOMERATES_INDEX2_ID, Long.toString(coreInfo[SYSCONGLOMERATES_CORE_NUM].getIndexConglomerate(((SYSCONGLOMERATESRowFactory) coreInfo[SYSCONGLOMERATES_CORE_NUM].getCatalogRowFactory()).SYSCONGLOMERATES_INDEX2_ID)));
params.put(CFG_SYSCONGLOMERATES_INDEX3_ID, Long.toString(coreInfo[SYSCONGLOMERATES_CORE_NUM].getIndexConglomerate(((SYSCONGLOMERATESRowFactory) coreInfo[SYSCONGLOMERATES_CORE_NUM].getCatalogRowFactory()).SYSCONGLOMERATES_INDEX3_ID)));
params.put(CFG_SYSSCHEMAS_ID, Long.toString(coreInfo[SYSSCHEMAS_CORE_NUM].getHeapConglomerate()));
params.put(CFG_SYSSCHEMAS_INDEX1_ID, Long.toString(coreInfo[SYSSCHEMAS_CORE_NUM].getIndexConglomerate(((SYSSCHEMASRowFactory) coreInfo[SYSSCHEMAS_CORE_NUM].getCatalogRowFactory()).SYSSCHEMAS_INDEX1_ID)));
params.put(CFG_SYSSCHEMAS_INDEX2_ID, Long.toString(coreInfo[SYSSCHEMAS_CORE_NUM].getIndexConglomerate(((SYSSCHEMASRowFactory) coreInfo[SYSSCHEMAS_CORE_NUM].getCatalogRowFactory()).SYSSCHEMAS_INDEX2_ID)));
// Add the SYSIBM Schema
sysIBMSchemaDesc = addSystemSchema(SchemaDescriptor.IBM_SYSTEM_SCHEMA_NAME, SchemaDescriptor.SYSIBM_SCHEMA_UUID, tc);
/* Create the non-core tables and generate the UUIDs for their
* heaps (before creating the indexes).
* RESOLVE - This loop will eventually drive all of the
* work for creating the non-core tables.
*/
for (int noncoreCtr = 0; noncoreCtr < NUM_NONCORE; noncoreCtr++) {
int catalogNumber = noncoreCtr + NUM_CORE;
boolean isDummy = (catalogNumber == SYSDUMMY1_CATALOG_NUM);
TabInfoImpl ti = getNonCoreTIByNumber(catalogNumber);
makeCatalog(ti, isDummy ? sysIBMSchemaDesc : systemSchemaDesc, tc);
if (isDummy)
populateSYSDUMMY1(tc);
// Clear the table entry for this non-core table,
// to allow it to be garbage-collected. The idea
// is that a running database might never need to
// reference a non-core table after it was created.
clearNoncoreTable(noncoreCtr);
}
// Add ths System Schema
addDescriptor(systemSchemaDesc, null, SYSSCHEMAS_CATALOG_NUM, false, tc);
// Add the following system Schema's to be compatible with DB2,
// currently Derby does not use them, but by creating them as
// system schema's it will insure applications can't create them,
// drop them, or create objects in them. This set includes:
// SYSCAT
// SYSFUN
// SYSPROC
// SYSSTAT
// NULLID
// Add the SYSCAT Schema
addSystemSchema(SchemaDescriptor.IBM_SYSTEM_CAT_SCHEMA_NAME, SchemaDescriptor.SYSCAT_SCHEMA_UUID, tc);
// Add the SYSFUN Schema
addSystemSchema(SchemaDescriptor.IBM_SYSTEM_FUN_SCHEMA_NAME, SchemaDescriptor.SYSFUN_SCHEMA_UUID, tc);
// Add the SYSPROC Schema
addSystemSchema(SchemaDescriptor.IBM_SYSTEM_PROC_SCHEMA_NAME, SchemaDescriptor.SYSPROC_SCHEMA_UUID, tc);
// Add the SYSSTAT Schema
addSystemSchema(SchemaDescriptor.IBM_SYSTEM_STAT_SCHEMA_NAME, SchemaDescriptor.SYSSTAT_SCHEMA_UUID, tc);
// Add the NULLID Schema
addSystemSchema(SchemaDescriptor.IBM_SYSTEM_NULLID_SCHEMA_NAME, SchemaDescriptor.NULLID_SCHEMA_UUID, tc);
// Add the SQLJ Schema
addSystemSchema(SchemaDescriptor.STD_SQLJ_SCHEMA_NAME, SchemaDescriptor.SQLJ_SCHEMA_UUID, tc);
// Add the SYSCS_DIAG Schema
addSystemSchema(SchemaDescriptor.STD_SYSTEM_DIAG_SCHEMA_NAME, SchemaDescriptor.SYSCS_DIAG_SCHEMA_UUID, tc);
// Add the SYSCS_UTIL Schema
addSystemSchema(SchemaDescriptor.STD_SYSTEM_UTIL_SCHEMA_NAME, SchemaDescriptor.SYSCS_UTIL_SCHEMA_UUID, tc);
// Add the APP schema
SchemaDescriptor appSchemaDesc = new SchemaDescriptor(this, SchemaDescriptor.STD_DEFAULT_SCHEMA_NAME, SchemaDescriptor.DEFAULT_USER_NAME, uuidFactory.recreateUUID(SchemaDescriptor.DEFAULT_SCHEMA_UUID), false);
addDescriptor(appSchemaDesc, null, SYSSCHEMAS_CATALOG_NUM, false, tc);
}
use of org.apache.derby.iapi.sql.dictionary.SchemaDescriptor in project derby by apache.
the class DataDictionaryImpl method peekAtIdentity.
public Long peekAtIdentity(String schemaName, String tableName) throws StandardException {
LanguageConnectionContext lcc = getLCC();
TransactionController tc = lcc.getTransactionExecute();
SchemaDescriptor sd = getSchemaDescriptor(schemaName, tc, true);
TableDescriptor td = getTableDescriptor(tableName, sd, tc);
if (td == null) {
throw StandardException.newException(SQLState.LANG_OBJECT_NOT_FOUND_DURING_EXECUTION, "TABLE", (schemaName + "." + tableName));
}
return peekAtSequence(SchemaDescriptor.STD_SYSTEM_SCHEMA_NAME, TableDescriptor.makeSequenceName(td.getUUID()));
}
use of org.apache.derby.iapi.sql.dictionary.SchemaDescriptor in project derby by apache.
the class SYSCONGLOMERATESRowFactory method makeRow.
/**
* Make a SYSCONGLOMERATES row
*
* @return Row suitable for inserting into SYSCONGLOMERATES.
*
* @exception StandardException thrown on failure
*/
public ExecRow makeRow(TupleDescriptor td, TupleDescriptor parent) throws StandardException {
ExecRow row;
DataValueDescriptor col;
String tabID = null;
Long conglomNumber = null;
String conglomName = null;
Boolean supportsIndex = null;
IndexRowGenerator indexRowGenerator = null;
Boolean supportsConstraint = null;
String conglomUUIDString = null;
String schemaID = null;
ConglomerateDescriptor conglomerate = (ConglomerateDescriptor) td;
if (td != null) {
/* Sometimes the SchemaDescriptor is non-null and sometimes it
* is null. (We can't just rely on getting the schema id from
* the ConglomerateDescriptor because it can be null when
* we are creating a new conglomerate.
*/
if (parent != null) {
SchemaDescriptor sd = (SchemaDescriptor) parent;
schemaID = sd.getUUID().toString();
} else {
schemaID = conglomerate.getSchemaID().toString();
}
tabID = conglomerate.getTableID().toString();
conglomNumber = conglomerate.getConglomerateNumber();
conglomName = conglomerate.getConglomerateName();
conglomUUIDString = conglomerate.getUUID().toString();
supportsIndex = conglomerate.isIndex();
indexRowGenerator = conglomerate.getIndexDescriptor();
supportsConstraint = conglomerate.isConstraint();
}
/* RESOLVE - It would be nice to require less knowledge about sysconglomerates
* and have this be more table driven.
*/
/* Build the row to insert */
row = getExecutionFactory().getValueRow(SYSCONGLOMERATES_COLUMN_COUNT);
/* 1st column is SCHEMAID (UUID - char(36)) */
row.setColumn(1, new SQLChar(schemaID));
/* 2nd column is TABLEID (UUID - char(36)) */
row.setColumn(2, new SQLChar(tabID));
/* 3rd column is CONGLOMERATENUMBER (long) */
row.setColumn(3, new SQLLongint(conglomNumber));
/* 4th column is CONGLOMERATENAME (varchar(128))
** If null, use the tableid so we always
** have a unique column
*/
row.setColumn(4, (conglomName == null) ? new SQLVarchar(tabID) : new SQLVarchar(conglomName));
/* 5th column is ISINDEX (boolean) */
row.setColumn(5, new SQLBoolean(supportsIndex));
/* 6th column is DESCRIPTOR
* (user type org.apache.derby.catalog.IndexDescriptor)
*/
row.setColumn(6, new UserType((indexRowGenerator == null ? (IndexDescriptor) null : indexRowGenerator.getIndexDescriptor())));
/* 7th column is ISCONSTRAINT (boolean) */
row.setColumn(7, new SQLBoolean(supportsConstraint));
/* 8th column is CONGLOMERATEID (UUID - char(36)) */
row.setColumn(8, new SQLChar(conglomUUIDString));
return row;
}
use of org.apache.derby.iapi.sql.dictionary.SchemaDescriptor in project derby by apache.
the class GenericPreparedStatement method makeInvalid.
/**
* Mark the dependent as invalid (due to at least one of
* its dependencies being invalid).
*
* @param action The action causing the invalidation
*
* @exception StandardException Standard Derby error policy.
*/
public void makeInvalid(int action, LanguageConnectionContext lcc) throws StandardException {
boolean alreadyInvalid;
switch(action) {
case DependencyManager.RECHECK_PRIVILEGES:
return;
}
synchronized (this) {
if (compilingStatement) {
// Since the statement is in the process of being compiled,
// and at the end of the compilation it will set isValid to
// true and overwrite whatever we set it to here, set another
// flag to indicate that an invalidation was requested. A
// re-compilation will be triggered if this flag is set, but
// not until the current compilation is done.
invalidatedWhileCompiling = true;
return;
}
alreadyInvalid = !isValid;
// make ourseleves invalid
isValid = false;
// block compiles while we are invalidating
beginCompiling();
}
try {
DependencyManager dm = lcc.getDataDictionary().getDependencyManager();
/* Clear out the old dependencies on this statement as we
* will build the new set during the reprepare in makeValid().
*/
dm.clearDependencies(lcc, this);
/*
** If we are invalidating an EXECUTE STATEMENT because of a stale
** plan, we also need to invalidate the stored prepared statement.
*/
if (execStmtName != null) {
switch(action) {
case DependencyManager.INTERNAL_RECOMPILE_REQUEST:
case DependencyManager.CHANGED_CURSOR:
{
/*
** Get the DataDictionary, so we can get the descriptor for
** the SPP to invalidate it.
*/
DataDictionary dd = lcc.getDataDictionary();
SchemaDescriptor sd = dd.getSchemaDescriptor(execSchemaName, lcc.getTransactionCompile(), true);
SPSDescriptor spsd = dd.getSPSDescriptor(execStmtName, sd);
spsd.makeInvalid(action, lcc);
break;
}
}
}
} finally {
endCompiling();
}
}
Aggregations