use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class SortResultSet method getRowLocation.
// /////////////////////////////////////////////////////////////////////////////
//
// CursorResultSet interface
//
// /////////////////////////////////////////////////////////////////////////////
/**
* This result set has its row location from
* the last fetch done. If the cursor is closed,
* a null is returned.
*
* @see CursorResultSet
*
* @return the row location of the current cursor row.
* @exception StandardException thrown on failure to get row location
*/
public RowLocation getRowLocation() throws StandardException {
if (!isOpen)
return null;
// REVISIT: could we reuse the same rowlocation object
// across several calls?
RowLocation rl;
rl = scanController.newRowLocationTemplate();
scanController.fetchLocation(rl);
return rl;
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_ConsistencyChecker method nullFirstHeapRow.
/**
* Set all of the columns in the first row from
* the heap to null, without
* updating the indexes on the table.
*
* @param schemaName The schema name.
* @param tableName The table name.
*
* @exception StandardException Thrown on error
*/
public static void nullFirstHeapRow(String schemaName, String tableName) throws StandardException {
T_ConsistencyChecker t_cc = new T_ConsistencyChecker(schemaName, tableName, null);
t_cc.getContexts();
t_cc.getDescriptors();
/* Open a scan on the heap */
ScanController heapScan = t_cc.openUnqualifiedHeapScan();
// Move to the 1st row in the heap
heapScan.next();
// Get the RowLocation
RowLocation baseRL = heapScan.newRowLocationTemplate();
heapScan.fetchLocation(baseRL);
// Replace the current row with nulls
heapScan.replace(t_cc.getHeapRowOfNulls().getRowArray(), (FormatableBitSet) null);
heapScan.close();
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_ConsistencyChecker method insertBadRowLocation.
/**
* Get the first row from the heap and insert it into
* the specified index, with a bad row location, without
* inserting it into the heap or the other indexes on the table.
*
* @param schemaName The schema name.
* @param tableName The table name.
* @param indexName The specified index.
*
* @exception StandardException Thrown on error
*/
public static void insertBadRowLocation(String schemaName, String tableName, String indexName) throws StandardException {
T_ConsistencyChecker t_cc = new T_ConsistencyChecker(schemaName, tableName, indexName);
t_cc.getContexts();
t_cc.getDescriptors();
/* Open a scan on the heap */
ScanController heapScan = t_cc.openUnqualifiedHeapScan();
// Get the RowLocation
RowLocation baseRL = heapScan.newRowLocationTemplate();
RowLocation badRL = heapScan.newRowLocationTemplate();
heapScan.close();
/* Open a scan on the index */
ExecRow indexRow = t_cc.getIndexTemplateRow(baseRL);
ScanController indexScan = t_cc.openUnqualifiedIndexScan();
// Move to the 1st row in the index
indexScan.next();
// Fetch the 1st row
indexScan.fetch(indexRow.getRowArray());
indexScan.close();
// Insert another copy of the 1st row into the index with a bad row location
int keyLength = t_cc.getIndexDescriptor().getIndexDescriptor().baseColumnPositions().length;
indexRow.setColumn(keyLength + 1, badRL);
ConglomerateController indexCC = t_cc.openIndexCC();
indexCC.insert(indexRow.getRowArray());
indexCC.close();
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class AlterTableConstantAction method defragmentRows.
/**
* Defragment rows in the given table.
* <p>
* Scans the rows at the end of a table and moves them to free spots
* towards the beginning of the table. In the same transaction all
* associated indexes are updated to reflect the new location of the
* base table row.
* <p>
* After a defragment pass, if was possible, there will be a set of
* empty pages at the end of the table which can be returned to the
* operating system by calling truncateEnd(). The allocation bit
* maps will be set so that new inserts will tend to go to empty and
* half filled pages starting from the front of the conglomerate.
*
* @param tc transaction controller to use to do updates.
*/
private void defragmentRows(TransactionController tc) throws StandardException {
GroupFetchScanController base_group_fetch_cc = null;
int num_indexes = 0;
int[][] index_col_map = null;
ScanController[] index_scan = null;
ConglomerateController[] index_cc = null;
DataValueDescriptor[][] index_row = null;
TransactionController nested_tc = null;
try {
nested_tc = tc.startNestedUserTransaction(false, true);
switch(td.getTableType()) {
/* Skip views and vti tables */
case TableDescriptor.VIEW_TYPE:
case TableDescriptor.VTI_TYPE:
return;
// DERBY-719,DERBY-720
default:
break;
}
/* Get a row template for the base table */
ExecRow br = lcc.getLanguageConnectionFactory().getExecutionFactory().getValueRow(td.getNumberOfColumns());
/* Fill the row with nulls of the correct type */
for (ColumnDescriptor cd : td.getColumnDescriptorList()) {
br.setColumn(cd.getPosition(), cd.getType().getNull());
}
DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
row_array[0] = br.getRowArray();
RowLocation[] old_row_location_array = new RowLocation[100];
RowLocation[] new_row_location_array = new RowLocation[100];
// Create the following 3 arrays which will be used to update
// each index as the scan moves rows about the heap as part of
// the compress:
// index_col_map - map location of index cols in the base row,
// ie. index_col_map[0] is column offset of 1st
// key column in base row. All offsets are 0
// based.
// index_scan - open ScanController used to delete old index row
// index_cc - open ConglomerateController used to insert new
// row
ConglomerateDescriptor[] conglom_descriptors = td.getConglomerateDescriptors();
// conglom_descriptors has an entry for the conglomerate and each
// one of it's indexes.
num_indexes = conglom_descriptors.length - 1;
// if indexes exist, set up data structures to update them
if (num_indexes > 0) {
// allocate arrays
index_col_map = new int[num_indexes][];
index_scan = new ScanController[num_indexes];
index_cc = new ConglomerateController[num_indexes];
index_row = new DataValueDescriptor[num_indexes][];
setup_indexes(nested_tc, td, index_col_map, index_scan, index_cc, index_row);
}
/* Open the heap for reading */
base_group_fetch_cc = nested_tc.defragmentConglomerate(td.getHeapConglomerateId(), false, true, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
int num_rows_fetched;
while ((num_rows_fetched = base_group_fetch_cc.fetchNextGroup(row_array, old_row_location_array, new_row_location_array)) != 0) {
if (num_indexes > 0) {
for (int row = 0; row < num_rows_fetched; row++) {
for (int index = 0; index < num_indexes; index++) {
fixIndex(row_array[row], index_row[index], old_row_location_array[row], new_row_location_array[row], index_cc[index], index_scan[index], index_col_map[index]);
}
}
}
}
// TODO - It would be better if commits happened more frequently
// in the nested transaction, but to do that there has to be more
// logic to catch a ddl that might jump in the middle of the
// above loop and invalidate the various table control structures
// which are needed to properly update the indexes. For example
// the above loop would corrupt an index added midway through
// the loop if not properly handled. See DERBY-1188.
nested_tc.commit();
} finally {
/* Clean up before we leave */
if (base_group_fetch_cc != null) {
base_group_fetch_cc.close();
base_group_fetch_cc = null;
}
if (num_indexes > 0) {
for (int i = 0; i < num_indexes; i++) {
if (index_scan != null && index_scan[i] != null) {
index_scan[i].close();
index_scan[i] = null;
}
if (index_cc != null && index_cc[i] != null) {
index_cc[i].close();
index_cc[i] = null;
}
}
}
if (nested_tc != null) {
nested_tc.destroy();
}
}
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class AlterTableConstantAction method truncateTable.
/*
* TRUNCATE TABLE TABLENAME; (quickly removes all the rows from table and
* it's correctponding indexes).
* Truncate is implemented by dropping the existing conglomerates(heap,indexes) and recreating a
* new ones with the properties of dropped conglomerates. Currently Store
* does not have support to truncate existing conglomerated until store
* supports it , this is the only way to do it.
* Error Cases: Truncate error cases same as other DDL's statements except
* 1)Truncate is not allowed when the table is references by another table.
* 2)Truncate is not allowed when there are enabled delete triggers on the table.
* Note: Because conglomerate number is changed during recreate process all the statements will be
* marked as invalide and they will get recompiled internally on their next
* execution. This is okay because truncate makes the number of rows to zero
* it may be good idea to recompile them becuase plans are likely to be
* incorrect. Recompile is done internally by Derby, user does not have
* any effect.
*/
private void truncateTable() throws StandardException {
ExecRow emptyHeapRow;
long newHeapConglom;
Properties properties = new Properties();
RowLocation rl;
if (SanityManager.DEBUG) {
if (lockGranularity != '\0') {
SanityManager.THROWASSERT("lockGranularity expected to be '\0', not " + lockGranularity);
}
SanityManager.ASSERT(columnInfo == null, "columnInfo expected to be null");
SanityManager.ASSERT(constraintActions == null, "constraintActions expected to be null");
}
// and the ON DELETE action is NO ACTION.
for (ConstraintDescriptor cd : dd.getConstraintDescriptors(td)) {
if (cd instanceof ReferencedKeyConstraintDescriptor) {
final ReferencedKeyConstraintDescriptor rfcd = (ReferencedKeyConstraintDescriptor) cd;
for (ConstraintDescriptor fkcd : rfcd.getNonSelfReferencingFK(ConstraintDescriptor.ENABLED)) {
final ForeignKeyConstraintDescriptor fk = (ForeignKeyConstraintDescriptor) fkcd;
throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_FK_REFERENCE_TABLE, td.getName());
}
}
}
// truncate is not allowed when there are enabled DELETE triggers
for (TriggerDescriptor trd : dd.getTriggerDescriptors(td)) {
if (trd.listensForEvent(TriggerDescriptor.TRIGGER_EVENT_DELETE) && trd.isEnabled()) {
throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_ENABLED_DELETE_TRIGGERS, td.getName(), trd.getName());
}
}
// gather information from the existing conglomerate to create new one.
emptyHeapRow = td.getEmptyExecRow();
compressHeapCC = tc.openConglomerate(td.getHeapConglomerateId(), false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
rl = compressHeapCC.newRowLocationTemplate();
// Get the properties on the old heap
compressHeapCC.getInternalTablePropertySet(properties);
compressHeapCC.close();
compressHeapCC = null;
// create new conglomerate
newHeapConglom = tc.createConglomerate("heap", emptyHeapRow.getRowArray(), // column sort order - not required for heap
null, td.getColumnCollationIds(), properties, TransactionController.IS_DEFAULT);
/* Set up index info to perform truncate on them*/
getAffectedIndexes();
if (numIndexes > 0) {
indexRows = new ExecIndexRow[numIndexes];
ordering = new ColumnOrdering[numIndexes][];
collation = new int[numIndexes][];
for (int index = 0; index < numIndexes; index++) {
IndexRowGenerator curIndex = compressIRGs[index];
// create a single index row template for each index
indexRows[index] = curIndex.getIndexRowTemplate();
curIndex.getIndexRow(emptyHeapRow, rl, indexRows[index], (FormatableBitSet) null);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* No need to try to enforce uniqueness here as
* index should be valid.
*/
int[] baseColumnPositions = curIndex.baseColumnPositions();
boolean[] isAscending = curIndex.isAscending();
int numColumnOrderings;
numColumnOrderings = baseColumnPositions.length + 1;
ordering[index] = new ColumnOrdering[numColumnOrderings];
collation[index] = curIndex.getColumnCollationIds(td.getColumnDescriptorList());
for (int ii = 0; ii < numColumnOrderings - 1; ii++) {
ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
ordering[index][numColumnOrderings - 1] = new IndexColumnOrder(numColumnOrderings - 1);
}
}
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
// truncate all indexes
if (numIndexes > 0) {
long[] newIndexCongloms = new long[numIndexes];
for (int index = 0; index < numIndexes; index++) {
updateIndex(newHeapConglom, dd, index, newIndexCongloms);
}
}
// Update the DataDictionary
// Get the ConglomerateDescriptor for the heap
long oldHeapConglom = td.getHeapConglomerateId();
ConglomerateDescriptor cd = td.getConglomerateDescriptor(oldHeapConglom);
// Update sys.sysconglomerates with new conglomerate #
dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
// Now that the updated information is available in the system tables,
// we should invalidate all statements that use the old conglomerates
dm.invalidateFor(td, DependencyManager.TRUNCATE_TABLE, lcc);
// Drop the old conglomerate
tc.dropConglomerate(oldHeapConglom);
cleanUp();
}
Aggregations