use of org.apache.derby.iapi.sql.dictionary.CatalogRowFactory in project derby by apache.
the class DataDictionaryImpl method addSystemTableToDictionary.
/**
* Add the required entries to the data dictionary for a System table.
*/
private void addSystemTableToDictionary(TabInfoImpl ti, SchemaDescriptor sd, TransactionController tc, DataDescriptorGenerator ddg) throws StandardException {
CatalogRowFactory crf = ti.getCatalogRowFactory();
String name = ti.getTableName();
long conglomId = ti.getHeapConglomerate();
SystemColumn[] columnList = crf.buildColumnList();
UUID heapUUID = crf.getCanonicalHeapUUID();
String heapName = crf.getCanonicalHeapName();
TableDescriptor td;
UUID toid;
int columnCount;
SystemColumn column;
// add table to the data dictionary
columnCount = columnList.length;
td = ddg.newTableDescriptor(name, sd, TableDescriptor.SYSTEM_TABLE_TYPE, TableDescriptor.ROW_LOCK_GRANULARITY);
td.setUUID(crf.getCanonicalTableUUID());
addDescriptor(td, sd, SYSTABLES_CATALOG_NUM, false, tc);
toid = td.getUUID();
/* Add the conglomerate for the heap */
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, heapName, false, null, false, heapUUID, toid, sd.getUUID());
addDescriptor(cgd, sd, SYSCONGLOMERATES_CATALOG_NUM, false, tc);
/* Create the columns */
ColumnDescriptor[] cdlArray = new ColumnDescriptor[columnCount];
for (int columnNumber = 0; columnNumber < columnCount; columnNumber++) {
column = columnList[columnNumber];
if (SanityManager.DEBUG) {
if (column == null) {
SanityManager.THROWASSERT("column " + columnNumber + " for table " + ti.getTableName() + " is null");
}
}
cdlArray[columnNumber] = makeColumnDescriptor(column, columnNumber + 1, td);
}
addDescriptorArray(cdlArray, td, SYSCOLUMNS_CATALOG_NUM, false, tc);
// now add the columns to the cdl of the table.
ColumnDescriptorList cdl = td.getColumnDescriptorList();
for (int i = 0; i < columnCount; i++) cdl.add(cdlArray[i]);
}
use of org.apache.derby.iapi.sql.dictionary.CatalogRowFactory in project derby by apache.
the class DataDictionaryImpl method bootstrapOneIndex.
private ConglomerateDescriptor bootstrapOneIndex(SchemaDescriptor sd, TransactionController tc, DataDescriptorGenerator ddg, TabInfoImpl ti, int indexNumber, long heapConglomerateNumber) throws StandardException {
boolean isUnique;
ConglomerateController cc;
ExecRow baseRow;
ExecIndexRow indexableRow;
int numColumns;
long conglomId;
RowLocation rl;
CatalogRowFactory rf = ti.getCatalogRowFactory();
IndexRowGenerator irg;
ConglomerateDescriptor conglomerateDescriptor;
initSystemIndexVariables(ti, indexNumber);
irg = ti.getIndexRowGenerator(indexNumber);
numColumns = ti.getIndexColumnCount(indexNumber);
/* Is the index unique */
isUnique = ti.isIndexUnique(indexNumber);
// create an index row template
indexableRow = irg.getIndexRowTemplate();
baseRow = rf.makeEmptyRowForCurrentVersion();
// Get a RowLocation template
cc = tc.openConglomerate(heapConglomerateNumber, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ);
rl = cc.newRowLocationTemplate();
cc.close();
// Get an index row based on the base row
irg.getIndexRow(baseRow, rl, indexableRow, (FormatableBitSet) null);
// Describe the properties of the index to the store using Properties
// RESOLVE: The following properties assume a BTREE index.
Properties indexProperties = ti.getCreateIndexProperties(indexNumber);
// Tell it the conglomerate id of the base table
indexProperties.put("baseConglomerateId", Long.toString(heapConglomerateNumber));
// All indexes are unique because they contain the RowLocation.
// The number of uniqueness columns must include the RowLocation
// if the user did not specify a unique index.
indexProperties.put("nUniqueColumns", Integer.toString(isUnique ? numColumns : numColumns + 1));
// By convention, the row location column is the last column
indexProperties.put("rowLocationColumn", Integer.toString(numColumns));
// For now, all columns are key fields, including the RowLocation
indexProperties.put("nKeyFields", Integer.toString(numColumns + 1));
/* Create and add the conglomerate (index) */
conglomId = tc.createConglomerate(// we're requesting an index conglomerate
"BTREE", indexableRow.getRowArray(), // default sort order
null, // default collation id's for collumns in all system congloms
null, // default properties
indexProperties, // not temporary
TransactionController.IS_DEFAULT);
conglomerateDescriptor = ddg.newConglomerateDescriptor(conglomId, rf.getIndexName(indexNumber), true, irg, false, rf.getCanonicalIndexUUID(indexNumber), rf.getCanonicalTableUUID(), sd.getUUID());
ti.setIndexConglomerate(conglomerateDescriptor);
return conglomerateDescriptor;
}
use of org.apache.derby.iapi.sql.dictionary.CatalogRowFactory in project derby by apache.
the class DataDictionaryImpl method addDescriptorArray.
/**
* array version of addDescriptor.
* @see DataDictionary#addDescriptor
*/
public void addDescriptorArray(TupleDescriptor[] td, TupleDescriptor parent, int catalogNumber, boolean allowDuplicates, TransactionController tc) throws StandardException {
TabInfoImpl ti = (catalogNumber < NUM_CORE) ? coreInfo[catalogNumber] : getNonCoreTI(catalogNumber);
CatalogRowFactory crf = ti.getCatalogRowFactory();
ExecRow[] rl = new ExecRow[td.length];
for (int index = 0; index < td.length; index++) {
ExecRow row = crf.makeRow(td[index], parent);
rl[index] = row;
}
int insertRetCode = ti.insertRowList(rl, tc);
if (!allowDuplicates && insertRetCode != TabInfoImpl.ROWNOTDUPLICATE) {
throw duplicateDescriptorException(td[insertRetCode], parent);
}
}
use of org.apache.derby.iapi.sql.dictionary.CatalogRowFactory in project derby by apache.
the class DataDictionaryImpl method getDescriptorViaHeap.
/**
* Return a (single or list of) catalog row descriptor(s) from a
* system table where the access a heap scan
*
* @param columns which columns to fetch from the system
* table, or null to fetch all columns
* @param scanQualifiers qualifiers
* @param ti The TabInfoImpl to use
* @param parentTupleDescriptor The parentDescriptor, if applicable.
* @param list The list to build, if supplied.
* If null, then caller expects a single descriptor
* @param returnType The type of descriptor to look for
*
* @return The last matching descriptor
*
* @exception StandardException Thrown on error
*/
protected <T extends TupleDescriptor> T getDescriptorViaHeap(FormatableBitSet columns, ScanQualifier[][] scanQualifiers, TabInfoImpl ti, TupleDescriptor parentTupleDescriptor, List<? super T> list, Class<T> returnType) throws StandardException {
CatalogRowFactory rf = ti.getCatalogRowFactory();
ExecRow outRow;
ScanController scanController;
TransactionController tc;
T td = null;
// Get the current transaction controller
tc = getTransactionCompile();
outRow = rf.makeEmptyRow();
/*
** Table scan
*/
scanController = tc.openScan(// conglomerate to open
ti.getHeapConglomerate(), // don't hold open across commit
false, // for read
0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_REPEATABLE_READ, columns, // start position - first row
(DataValueDescriptor[]) null, // startSearchOperation - none
0, // scanQualifier,
scanQualifiers, // stop position - through last row
(DataValueDescriptor[]) null, // stopSearchOperation - none
0);
while (scanController.fetchNext(outRow.getRowArray())) {
td = returnType.cast(rf.buildDescriptor(outRow, parentTupleDescriptor, this));
/* If dList is null, then caller only wants a single descriptor - we're done
* else just add the current descriptor to the list.
*/
if (list == null) {
break;
} else {
list.add(td);
}
}
scanController.close();
return td;
}
use of org.apache.derby.iapi.sql.dictionary.CatalogRowFactory in project derby by apache.
the class DataDictionaryImpl method getDescriptorViaIndexMinion.
private <T extends TupleDescriptor> T getDescriptorViaIndexMinion(int indexId, ExecIndexRow keyRow, ScanQualifier[][] scanQualifiers, TabInfoImpl ti, TupleDescriptor parentTupleDescriptor, List<? super T> list, Class<T> returnType, boolean forUpdate, int isolationLevel, TransactionController tc) throws StandardException {
CatalogRowFactory rf = ti.getCatalogRowFactory();
ConglomerateController heapCC;
ExecIndexRow indexRow1;
ExecRow outRow;
RowLocation baseRowLocation;
ScanController scanController;
T td = null;
if (SanityManager.DEBUG) {
SanityManager.ASSERT(isolationLevel == TransactionController.ISOLATION_REPEATABLE_READ || isolationLevel == TransactionController.ISOLATION_READ_UNCOMMITTED);
}
outRow = rf.makeEmptyRow();
heapCC = tc.openConglomerate(ti.getHeapConglomerate(), false, 0, TransactionController.MODE_RECORD, isolationLevel);
/* Scan the index and go to the data pages for qualifying rows to
* build the column descriptor.
*/
scanController = tc.openScan(// conglomerate to open
ti.getIndexConglomerate(indexId), // don't hold open across commit
false, (forUpdate) ? TransactionController.OPENMODE_FORUPDATE : 0, TransactionController.MODE_RECORD, isolationLevel, // all fields as objects
(FormatableBitSet) null, // start position - first row
keyRow.getRowArray(), // startSearchOperation
ScanController.GE, // scanQualifier,
scanQualifiers, // stop position - through last row
keyRow.getRowArray(), // stopSearchOperation
ScanController.GT);
while (true) {
// create an index row template
indexRow1 = getIndexRowFromHeapRow(ti.getIndexRowGenerator(indexId), heapCC.newRowLocationTemplate(), outRow);
// from the table.
if (!scanController.fetchNext(indexRow1.getRowArray())) {
break;
}
baseRowLocation = (RowLocation) indexRow1.getColumn(indexRow1.nColumns());
// RESOLVE paulat - remove the try catch block when track 3677 is fixed
// just leave the contents of the try block
// adding to get more info on track 3677
boolean base_row_exists = false;
try {
base_row_exists = heapCC.fetch(baseRowLocation, outRow.getRowArray(), (FormatableBitSet) null);
} catch (RuntimeException re) {
if (SanityManager.DEBUG) {
if (re instanceof AssertFailure) {
StringBuffer strbuf = new StringBuffer("Error retrieving base row in table " + ti.getTableName());
strbuf.append(": An ASSERT was thrown when trying to locate a row matching index row " + indexRow1 + " from index " + ti.getIndexName(indexId) + ", conglom number " + ti.getIndexConglomerate(indexId));
debugGenerateInfo(strbuf, tc, heapCC, ti, indexId);
}
}
throw re;
} catch (StandardException se) {
if (SanityManager.DEBUG) {
// do not want to catch lock timeout errors here
if (se.getSQLState().equals("XSRS9")) {
StringBuffer strbuf = new StringBuffer("Error retrieving base row in table " + ti.getTableName());
strbuf.append(": A StandardException was thrown when trying to locate a row matching index row " + indexRow1 + " from index " + ti.getIndexName(indexId) + ", conglom number " + ti.getIndexConglomerate(indexId));
debugGenerateInfo(strbuf, tc, heapCC, ti, indexId);
}
}
throw se;
}
if (SanityManager.DEBUG) {
// holding scan cursor on index at ISOLATION_REPEATABLE_READ.
if (!base_row_exists && (isolationLevel == TransactionController.ISOLATION_REPEATABLE_READ)) {
StringBuffer strbuf = new StringBuffer("Error retrieving base row in table " + ti.getTableName());
strbuf.append(": could not locate a row matching index row " + indexRow1 + " from index " + ti.getIndexName(indexId) + ", conglom number " + ti.getIndexConglomerate(indexId));
debugGenerateInfo(strbuf, tc, heapCC, ti, indexId);
// RESOLVE: for now, we are going to kill the VM
// to help debug this problem.
System.exit(1);
// RESOLVE: not currently reached
// SanityManager.THROWASSERT(strbuf.toString());
}
}
if (!base_row_exists && (isolationLevel == TransactionController.ISOLATION_READ_UNCOMMITTED)) {
// If isolationLevel == ISOLATION_READ_UNCOMMITTED we may
// possibly see that the base row does not exist even if the
// index row did. This mode is currently only used by
// TableNameInfo's call to hashAllTableDescriptorsByTableId,
// cf. DERBY-3678, and by getStatisticsDescriptors,
// cf. DERBY-4881.
//
// For the former call, a table's schema descriptor is attempted
// read, and if the base row for the schema has gone between
// reading the index and the base table, the table that needs
// this information has gone, too. So, the table should not
// be needed for printing lock timeout or deadlock
// information, so we can safely just return an empty (schema)
// descriptor. Furthermore, neither Timeout or DeadLock
// diagnostics access the schema of a table descriptor, so it
// seems safe to just return an empty schema descriptor for
// the table.
//
// There is a theoretical chance another row may have taken
// the first one's place, but only if a compress of the base
// table managed to run between the time we read the index and
// the base row, which seems unlikely so we ignore that.
//
// Even the index row may be gone in the above use case, of
// course, and that case also returns an empty descriptor
// since no match is found.
td = null;
} else {
// normal case
td = returnType.cast(rf.buildDescriptor(outRow, parentTupleDescriptor, this));
}
/* If list is null, then caller only wants a single descriptor - we're done
* else just add the current descriptor to the list.
*/
if (list == null) {
break;
} else if (td != null) {
list.add(td);
}
}
scanController.close();
heapCC.close();
return td;
}
Aggregations