use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class DataDictionaryImpl method populateSYSDUMMY1.
/**
* Populate SYSDUMMY1 table with a single row.
*
* @exception StandardException Standard Derby error policy
*/
protected void populateSYSDUMMY1(TransactionController tc) throws StandardException {
TabInfoImpl ti = getNonCoreTI(SYSDUMMY1_CATALOG_NUM);
ExecRow row = ti.getCatalogRowFactory().makeRow(null, null);
int insertRetCode = ti.insertRow(row, tc);
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class DataDictionaryImpl method getDescriptorViaHeap.
/**
* Return a (single or list of) catalog row descriptor(s) from a
* system table where the access a heap scan
*
* @param columns which columns to fetch from the system
* table, or null to fetch all columns
* @param scanQualifiers qualifiers
* @param ti The TabInfoImpl to use
* @param parentTupleDescriptor The parentDescriptor, if applicable.
* @param list The list to build, if supplied.
* If null, then caller expects a single descriptor
* @param returnType The type of descriptor to look for
*
* @return The last matching descriptor
*
* @exception StandardException Thrown on error
*/
protected <T extends TupleDescriptor> T getDescriptorViaHeap(FormatableBitSet columns, ScanQualifier[][] scanQualifiers, TabInfoImpl ti, TupleDescriptor parentTupleDescriptor, List<? super T> list, Class<T> returnType) throws StandardException {
CatalogRowFactory rf = ti.getCatalogRowFactory();
ExecRow outRow;
ScanController scanController;
TransactionController tc;
T td = null;
// Get the current transaction controller
tc = getTransactionCompile();
outRow = rf.makeEmptyRow();
/*
** Table scan
*/
scanController = tc.openScan(// conglomerate to open
ti.getHeapConglomerate(), // don't hold open across commit
false, // for read
0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_REPEATABLE_READ, columns, // start position - first row
(DataValueDescriptor[]) null, // startSearchOperation - none
0, // scanQualifier,
scanQualifiers, // stop position - through last row
(DataValueDescriptor[]) null, // stopSearchOperation - none
0);
while (scanController.fetchNext(outRow.getRowArray())) {
td = returnType.cast(rf.buildDescriptor(outRow, parentTupleDescriptor, this));
/* If dList is null, then caller only wants a single descriptor - we're done
* else just add the current descriptor to the list.
*/
if (list == null) {
break;
} else {
list.add(td);
}
}
scanController.close();
return td;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class DataDictionaryImpl method getDescriptorViaIndexMinion.
private <T extends TupleDescriptor> T getDescriptorViaIndexMinion(int indexId, ExecIndexRow keyRow, ScanQualifier[][] scanQualifiers, TabInfoImpl ti, TupleDescriptor parentTupleDescriptor, List<? super T> list, Class<T> returnType, boolean forUpdate, int isolationLevel, TransactionController tc) throws StandardException {
CatalogRowFactory rf = ti.getCatalogRowFactory();
ConglomerateController heapCC;
ExecIndexRow indexRow1;
ExecRow outRow;
RowLocation baseRowLocation;
ScanController scanController;
T td = null;
if (SanityManager.DEBUG) {
SanityManager.ASSERT(isolationLevel == TransactionController.ISOLATION_REPEATABLE_READ || isolationLevel == TransactionController.ISOLATION_READ_UNCOMMITTED);
}
outRow = rf.makeEmptyRow();
heapCC = tc.openConglomerate(ti.getHeapConglomerate(), false, 0, TransactionController.MODE_RECORD, isolationLevel);
/* Scan the index and go to the data pages for qualifying rows to
* build the column descriptor.
*/
scanController = tc.openScan(// conglomerate to open
ti.getIndexConglomerate(indexId), // don't hold open across commit
false, (forUpdate) ? TransactionController.OPENMODE_FORUPDATE : 0, TransactionController.MODE_RECORD, isolationLevel, // all fields as objects
(FormatableBitSet) null, // start position - first row
keyRow.getRowArray(), // startSearchOperation
ScanController.GE, // scanQualifier,
scanQualifiers, // stop position - through last row
keyRow.getRowArray(), // stopSearchOperation
ScanController.GT);
while (true) {
// create an index row template
indexRow1 = getIndexRowFromHeapRow(ti.getIndexRowGenerator(indexId), heapCC.newRowLocationTemplate(), outRow);
// from the table.
if (!scanController.fetchNext(indexRow1.getRowArray())) {
break;
}
baseRowLocation = (RowLocation) indexRow1.getColumn(indexRow1.nColumns());
// RESOLVE paulat - remove the try catch block when track 3677 is fixed
// just leave the contents of the try block
// adding to get more info on track 3677
boolean base_row_exists = false;
try {
base_row_exists = heapCC.fetch(baseRowLocation, outRow.getRowArray(), (FormatableBitSet) null);
} catch (RuntimeException re) {
if (SanityManager.DEBUG) {
if (re instanceof AssertFailure) {
StringBuffer strbuf = new StringBuffer("Error retrieving base row in table " + ti.getTableName());
strbuf.append(": An ASSERT was thrown when trying to locate a row matching index row " + indexRow1 + " from index " + ti.getIndexName(indexId) + ", conglom number " + ti.getIndexConglomerate(indexId));
debugGenerateInfo(strbuf, tc, heapCC, ti, indexId);
}
}
throw re;
} catch (StandardException se) {
if (SanityManager.DEBUG) {
// do not want to catch lock timeout errors here
if (se.getSQLState().equals("XSRS9")) {
StringBuffer strbuf = new StringBuffer("Error retrieving base row in table " + ti.getTableName());
strbuf.append(": A StandardException was thrown when trying to locate a row matching index row " + indexRow1 + " from index " + ti.getIndexName(indexId) + ", conglom number " + ti.getIndexConglomerate(indexId));
debugGenerateInfo(strbuf, tc, heapCC, ti, indexId);
}
}
throw se;
}
if (SanityManager.DEBUG) {
// holding scan cursor on index at ISOLATION_REPEATABLE_READ.
if (!base_row_exists && (isolationLevel == TransactionController.ISOLATION_REPEATABLE_READ)) {
StringBuffer strbuf = new StringBuffer("Error retrieving base row in table " + ti.getTableName());
strbuf.append(": could not locate a row matching index row " + indexRow1 + " from index " + ti.getIndexName(indexId) + ", conglom number " + ti.getIndexConglomerate(indexId));
debugGenerateInfo(strbuf, tc, heapCC, ti, indexId);
// RESOLVE: for now, we are going to kill the VM
// to help debug this problem.
System.exit(1);
// RESOLVE: not currently reached
// SanityManager.THROWASSERT(strbuf.toString());
}
}
if (!base_row_exists && (isolationLevel == TransactionController.ISOLATION_READ_UNCOMMITTED)) {
// If isolationLevel == ISOLATION_READ_UNCOMMITTED we may
// possibly see that the base row does not exist even if the
// index row did. This mode is currently only used by
// TableNameInfo's call to hashAllTableDescriptorsByTableId,
// cf. DERBY-3678, and by getStatisticsDescriptors,
// cf. DERBY-4881.
//
// For the former call, a table's schema descriptor is attempted
// read, and if the base row for the schema has gone between
// reading the index and the base table, the table that needs
// this information has gone, too. So, the table should not
// be needed for printing lock timeout or deadlock
// information, so we can safely just return an empty (schema)
// descriptor. Furthermore, neither Timeout or DeadLock
// diagnostics access the schema of a table descriptor, so it
// seems safe to just return an empty schema descriptor for
// the table.
//
// There is a theoretical chance another row may have taken
// the first one's place, but only if a compress of the base
// table managed to run between the time we read the index and
// the base row, which seems unlikely so we ignore that.
//
// Even the index row may be gone in the above use case, of
// course, and that case also returns an empty descriptor
// since no match is found.
td = null;
} else {
// normal case
td = returnType.cast(rf.buildDescriptor(outRow, parentTupleDescriptor, this));
}
/* If list is null, then caller only wants a single descriptor - we're done
* else just add the current descriptor to the list.
*/
if (list == null) {
break;
} else if (td != null) {
list.add(td);
}
}
scanController.close();
heapCC.close();
return td;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class DataDictionaryImpl method updateLockGranularity.
/**
* Update the lockGranularity for the specified table.
*
* @param td The TableDescriptor for the table
* @param schema The SchemaDescriptor for the table
* @param lockGranularity The new lockGranularity
* @param tc The TransactionController to use.
*
* @exception StandardException Thrown on error
*/
public void updateLockGranularity(TableDescriptor td, SchemaDescriptor schema, char lockGranularity, TransactionController tc) throws StandardException {
ExecRow row;
DataValueDescriptor schemaIDOrderable;
DataValueDescriptor tableNameOrderable;
TabInfoImpl ti = coreInfo[SYSTABLES_CORE_NUM];
SYSTABLESRowFactory rf = (SYSTABLESRowFactory) ti.getCatalogRowFactory();
/* Use tableIdOrderable and schemaIdOrderable in both start
* and stop position for index 1 scan.
*/
tableNameOrderable = new SQLVarchar(td.getName());
schemaIDOrderable = getIDValueAsCHAR(schema.getUUID());
/* Set up the start/stop position for the scan */
ExecIndexRow keyRow1 = exFactory.getIndexableRow(2);
keyRow1.setColumn(1, tableNameOrderable);
keyRow1.setColumn(2, schemaIDOrderable);
// build the row to be stuffed into SYSTABLES.
row = rf.makeRow(td, schema);
// update row in catalog (no indexes)
boolean[] bArray = new boolean[2];
for (int index = 0; index < 2; index++) {
bArray[index] = false;
}
ti.updateRow(keyRow1, row, SYSTABLESRowFactory.SYSTABLES_INDEX1_ID, bArray, (int[]) null, tc);
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class DataDictionaryImpl method getAllDependencyDescriptorsList.
/**
* Build and return an List with DependencyDescriptors for
* all of the stored dependencies.
* This is useful for consistency checking.
*
* @return List List of all DependencyDescriptors.
*
* @exception StandardException Thrown on failure
*/
public List<TupleDescriptor> getAllDependencyDescriptorsList() throws StandardException {
ScanController scanController;
TransactionController tc;
ExecRow outRow;
ExecRow templateRow;
List<TupleDescriptor> ddl = newSList();
TabInfoImpl ti = getNonCoreTI(SYSDEPENDS_CATALOG_NUM);
SYSDEPENDSRowFactory rf = (SYSDEPENDSRowFactory) ti.getCatalogRowFactory();
// Get the current transaction controller
tc = getTransactionCompile();
outRow = rf.makeEmptyRow();
scanController = tc.openScan(// conglomerate to open
ti.getHeapConglomerate(), // don't hold open across commit
false, // for read
0, // scans entire table.
TransactionController.MODE_TABLE, TransactionController.ISOLATION_REPEATABLE_READ, // all fields as objects
(FormatableBitSet) null, // start position - first row
null, // startSearchOperation
ScanController.GE, null, // stop position - through last row
null, // stopSearchOperation
ScanController.GT);
while (scanController.fetchNext(outRow.getRowArray())) {
DependencyDescriptor dependencyDescriptor;
dependencyDescriptor = (DependencyDescriptor) rf.buildDescriptor(outRow, (TupleDescriptor) null, this);
ddl.add(dependencyDescriptor);
}
scanController.close();
return ddl;
}
Aggregations