use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class TestPropertyInfo method getConglomerateProperties.
private static Properties getConglomerateProperties(String schemaName, String conglomerateName, boolean isIndex) throws java.sql.SQLException {
ConglomerateController cc;
ConglomerateDescriptor cd;
DataDictionary dd;
Properties properties;
SchemaDescriptor sd;
TableDescriptor td;
TransactionController tc;
long conglomerateNumber;
// find the language context.
LanguageConnectionContext lcc = ConnectionUtil.getCurrentLCC();
// Get the current transaction controller
tc = lcc.getTransactionExecute();
try {
// find the DataDictionary
dd = lcc.getDataDictionary();
// get the SchemaDescriptor
sd = dd.getSchemaDescriptor(schemaName, tc, true);
if (!isIndex) {
// get the TableDescriptor for the table
td = dd.getTableDescriptor(conglomerateName, sd, tc);
// Return an empty Properties if table does not exist or if it is for a view.
if ((td == null) || td.getTableType() == TableDescriptor.VIEW_TYPE) {
return new Properties();
}
conglomerateNumber = td.getHeapConglomerateId();
} else {
// get the ConglomerateDescriptor for the index
cd = dd.getConglomerateDescriptor(conglomerateName, sd, false);
// Return an empty Properties if index does not exist
if (cd == null) {
return new Properties();
}
conglomerateNumber = cd.getConglomerateNumber();
}
cc = tc.openConglomerate(conglomerateNumber, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
properties = cc.getInternalTablePropertySet(new Properties());
cc.close();
} catch (StandardException se) {
throw PublicAPI.wrapStandardException(se);
}
return properties;
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class AlterTableConstantAction method defragmentRows.
/**
* Defragment rows in the given table.
* <p>
* Scans the rows at the end of a table and moves them to free spots
* towards the beginning of the table. In the same transaction all
* associated indexes are updated to reflect the new location of the
* base table row.
* <p>
* After a defragment pass, if was possible, there will be a set of
* empty pages at the end of the table which can be returned to the
* operating system by calling truncateEnd(). The allocation bit
* maps will be set so that new inserts will tend to go to empty and
* half filled pages starting from the front of the conglomerate.
*
* @param tc transaction controller to use to do updates.
*/
private void defragmentRows(TransactionController tc) throws StandardException {
GroupFetchScanController base_group_fetch_cc = null;
int num_indexes = 0;
int[][] index_col_map = null;
ScanController[] index_scan = null;
ConglomerateController[] index_cc = null;
DataValueDescriptor[][] index_row = null;
TransactionController nested_tc = null;
try {
nested_tc = tc.startNestedUserTransaction(false, true);
switch(td.getTableType()) {
/* Skip views and vti tables */
case TableDescriptor.VIEW_TYPE:
case TableDescriptor.VTI_TYPE:
return;
// DERBY-719,DERBY-720
default:
break;
}
/* Get a row template for the base table */
ExecRow br = lcc.getLanguageConnectionFactory().getExecutionFactory().getValueRow(td.getNumberOfColumns());
/* Fill the row with nulls of the correct type */
for (ColumnDescriptor cd : td.getColumnDescriptorList()) {
br.setColumn(cd.getPosition(), cd.getType().getNull());
}
DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
row_array[0] = br.getRowArray();
RowLocation[] old_row_location_array = new RowLocation[100];
RowLocation[] new_row_location_array = new RowLocation[100];
// Create the following 3 arrays which will be used to update
// each index as the scan moves rows about the heap as part of
// the compress:
// index_col_map - map location of index cols in the base row,
// ie. index_col_map[0] is column offset of 1st
// key column in base row. All offsets are 0
// based.
// index_scan - open ScanController used to delete old index row
// index_cc - open ConglomerateController used to insert new
// row
ConglomerateDescriptor[] conglom_descriptors = td.getConglomerateDescriptors();
// conglom_descriptors has an entry for the conglomerate and each
// one of it's indexes.
num_indexes = conglom_descriptors.length - 1;
// if indexes exist, set up data structures to update them
if (num_indexes > 0) {
// allocate arrays
index_col_map = new int[num_indexes][];
index_scan = new ScanController[num_indexes];
index_cc = new ConglomerateController[num_indexes];
index_row = new DataValueDescriptor[num_indexes][];
setup_indexes(nested_tc, td, index_col_map, index_scan, index_cc, index_row);
}
/* Open the heap for reading */
base_group_fetch_cc = nested_tc.defragmentConglomerate(td.getHeapConglomerateId(), false, true, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
int num_rows_fetched;
while ((num_rows_fetched = base_group_fetch_cc.fetchNextGroup(row_array, old_row_location_array, new_row_location_array)) != 0) {
if (num_indexes > 0) {
for (int row = 0; row < num_rows_fetched; row++) {
for (int index = 0; index < num_indexes; index++) {
fixIndex(row_array[row], index_row[index], old_row_location_array[row], new_row_location_array[row], index_cc[index], index_scan[index], index_col_map[index]);
}
}
}
}
// TODO - It would be better if commits happened more frequently
// in the nested transaction, but to do that there has to be more
// logic to catch a ddl that might jump in the middle of the
// above loop and invalidate the various table control structures
// which are needed to properly update the indexes. For example
// the above loop would corrupt an index added midway through
// the loop if not properly handled. See DERBY-1188.
nested_tc.commit();
} finally {
/* Clean up before we leave */
if (base_group_fetch_cc != null) {
base_group_fetch_cc.close();
base_group_fetch_cc = null;
}
if (num_indexes > 0) {
for (int i = 0; i < num_indexes; i++) {
if (index_scan != null && index_scan[i] != null) {
index_scan[i].close();
index_scan[i] = null;
}
if (index_cc != null && index_cc[i] != null) {
index_cc[i].close();
index_cc[i] = null;
}
}
}
if (nested_tc != null) {
nested_tc.destroy();
}
}
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class AlterTableConstantAction method truncateTable.
/*
* TRUNCATE TABLE TABLENAME; (quickly removes all the rows from table and
* it's correctponding indexes).
* Truncate is implemented by dropping the existing conglomerates(heap,indexes) and recreating a
* new ones with the properties of dropped conglomerates. Currently Store
* does not have support to truncate existing conglomerated until store
* supports it , this is the only way to do it.
* Error Cases: Truncate error cases same as other DDL's statements except
* 1)Truncate is not allowed when the table is references by another table.
* 2)Truncate is not allowed when there are enabled delete triggers on the table.
* Note: Because conglomerate number is changed during recreate process all the statements will be
* marked as invalide and they will get recompiled internally on their next
* execution. This is okay because truncate makes the number of rows to zero
* it may be good idea to recompile them becuase plans are likely to be
* incorrect. Recompile is done internally by Derby, user does not have
* any effect.
*/
private void truncateTable() throws StandardException {
ExecRow emptyHeapRow;
long newHeapConglom;
Properties properties = new Properties();
RowLocation rl;
if (SanityManager.DEBUG) {
if (lockGranularity != '\0') {
SanityManager.THROWASSERT("lockGranularity expected to be '\0', not " + lockGranularity);
}
SanityManager.ASSERT(columnInfo == null, "columnInfo expected to be null");
SanityManager.ASSERT(constraintActions == null, "constraintActions expected to be null");
}
// and the ON DELETE action is NO ACTION.
for (ConstraintDescriptor cd : dd.getConstraintDescriptors(td)) {
if (cd instanceof ReferencedKeyConstraintDescriptor) {
final ReferencedKeyConstraintDescriptor rfcd = (ReferencedKeyConstraintDescriptor) cd;
for (ConstraintDescriptor fkcd : rfcd.getNonSelfReferencingFK(ConstraintDescriptor.ENABLED)) {
final ForeignKeyConstraintDescriptor fk = (ForeignKeyConstraintDescriptor) fkcd;
throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_FK_REFERENCE_TABLE, td.getName());
}
}
}
// truncate is not allowed when there are enabled DELETE triggers
for (TriggerDescriptor trd : dd.getTriggerDescriptors(td)) {
if (trd.listensForEvent(TriggerDescriptor.TRIGGER_EVENT_DELETE) && trd.isEnabled()) {
throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_ENABLED_DELETE_TRIGGERS, td.getName(), trd.getName());
}
}
// gather information from the existing conglomerate to create new one.
emptyHeapRow = td.getEmptyExecRow();
compressHeapCC = tc.openConglomerate(td.getHeapConglomerateId(), false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
rl = compressHeapCC.newRowLocationTemplate();
// Get the properties on the old heap
compressHeapCC.getInternalTablePropertySet(properties);
compressHeapCC.close();
compressHeapCC = null;
// create new conglomerate
newHeapConglom = tc.createConglomerate("heap", emptyHeapRow.getRowArray(), // column sort order - not required for heap
null, td.getColumnCollationIds(), properties, TransactionController.IS_DEFAULT);
/* Set up index info to perform truncate on them*/
getAffectedIndexes();
if (numIndexes > 0) {
indexRows = new ExecIndexRow[numIndexes];
ordering = new ColumnOrdering[numIndexes][];
collation = new int[numIndexes][];
for (int index = 0; index < numIndexes; index++) {
IndexRowGenerator curIndex = compressIRGs[index];
// create a single index row template for each index
indexRows[index] = curIndex.getIndexRowTemplate();
curIndex.getIndexRow(emptyHeapRow, rl, indexRows[index], (FormatableBitSet) null);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* No need to try to enforce uniqueness here as
* index should be valid.
*/
int[] baseColumnPositions = curIndex.baseColumnPositions();
boolean[] isAscending = curIndex.isAscending();
int numColumnOrderings;
numColumnOrderings = baseColumnPositions.length + 1;
ordering[index] = new ColumnOrdering[numColumnOrderings];
collation[index] = curIndex.getColumnCollationIds(td.getColumnDescriptorList());
for (int ii = 0; ii < numColumnOrderings - 1; ii++) {
ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
ordering[index][numColumnOrderings - 1] = new IndexColumnOrder(numColumnOrderings - 1);
}
}
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
// truncate all indexes
if (numIndexes > 0) {
long[] newIndexCongloms = new long[numIndexes];
for (int index = 0; index < numIndexes; index++) {
updateIndex(newHeapConglom, dd, index, newIndexCongloms);
}
}
// Update the DataDictionary
// Get the ConglomerateDescriptor for the heap
long oldHeapConglom = td.getHeapConglomerateId();
ConglomerateDescriptor cd = td.getConglomerateDescriptor(oldHeapConglom);
// Update sys.sysconglomerates with new conglomerate #
dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
// Now that the updated information is available in the system tables,
// we should invalidate all statements that use the old conglomerates
dm.invalidateFor(td, DependencyManager.TRUNCATE_TABLE, lcc);
// Drop the old conglomerate
tc.dropConglomerate(oldHeapConglom);
cleanUp();
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class AlterTableConstantAction method setup_indexes.
private static void setup_indexes(TransactionController tc, TableDescriptor td, int[][] index_col_map, ScanController[] index_scan, ConglomerateController[] index_cc, DataValueDescriptor[][] index_row) throws StandardException {
// Initialize the following 3 arrays which will be used to update
// each index as the scan moves rows about the heap as part of
// the compress:
// index_col_map - map location of index cols in the base row, ie.
// index_col_map[0] is column offset of 1st key
// column in base row. All offsets are 0 based.
// index_scan - open ScanController used to delete old index row
// index_cc - open ConglomerateController used to insert new row
ConglomerateDescriptor[] conglom_descriptors = td.getConglomerateDescriptors();
int index_idx = 0;
for (int cd_idx = 0; cd_idx < conglom_descriptors.length; cd_idx++) {
ConglomerateDescriptor index_cd = conglom_descriptors[cd_idx];
if (!index_cd.isIndex()) {
// skip the heap descriptor entry
continue;
}
// ScanControllers are used to delete old index row
index_scan[index_idx] = tc.openScan(index_cd.getConglomerateNumber(), // hold
true, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, // full row is retrieved,
null, // startKeyValue - will be reset with reopenScan()
null, //
0, // qualifier
null, // stopKeyValue - will be reset with reopenScan()
null, //
0);
// ConglomerateControllers are used to insert new index row
index_cc[index_idx] = tc.openConglomerate(index_cd.getConglomerateNumber(), // hold
true, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
// build column map to allow index row to be built from base row
int[] baseColumnPositions = index_cd.getIndexDescriptor().baseColumnPositions();
int[] zero_based_map = new int[baseColumnPositions.length];
for (int i = 0; i < baseColumnPositions.length; i++) {
zero_based_map[i] = baseColumnPositions[i] - 1;
}
index_col_map[index_idx] = zero_based_map;
// build row array to delete from index and insert into index
// length is length of column map + 1 for RowLocation.
index_row[index_idx] = new DataValueDescriptor[baseColumnPositions.length + 1];
index_idx++;
}
}
use of org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor in project derby by apache.
the class AlterTableConstantAction method getAffectedIndexes.
/**
* Get info on the indexes on the table being compressed.
*
* @exception StandardException Thrown on error
*/
private void getAffectedIndexes() throws StandardException {
IndexLister indexLister = td.getIndexLister();
/* We have to get non-distinct index row generaters and conglom numbers
* here and then compress it to distinct later because drop column
* will need to change the index descriptor directly on each index
* entry in SYSCONGLOMERATES, on duplicate indexes too.
*/
compressIRGs = indexLister.getIndexRowGenerators();
numIndexes = compressIRGs.length;
indexConglomerateNumbers = indexLister.getIndexConglomerateNumbers();
if (// then it's drop column
!(compressTable || truncateTable)) {
ArrayList<ConstantAction> newCongloms = new ArrayList<ConstantAction>();
for (int i = 0; i < compressIRGs.length; i++) {
int[] baseColumnPositions = compressIRGs[i].baseColumnPositions();
int j;
for (j = 0; j < baseColumnPositions.length; j++) if (baseColumnPositions[j] == droppedColumnPosition)
break;
if (// not related
j == baseColumnPositions.length)
continue;
if (baseColumnPositions.length == 1 || (behavior == StatementType.DROP_CASCADE && compressIRGs[i].isUnique())) {
numIndexes--;
/* get first conglomerate with this conglom number each time
* and each duplicate one will be eventually all dropped
*/
ConglomerateDescriptor cd = td.getConglomerateDescriptor(indexConglomerateNumbers[i]);
dropConglomerate(cd, td, true, newCongloms, activation, activation.getLanguageConnectionContext());
// mark it
compressIRGs[i] = null;
continue;
}
// a constraint, because constraints have already been handled
if (compressIRGs[i].isUnique()) {
ConglomerateDescriptor cd = td.getConglomerateDescriptor(indexConglomerateNumbers[i]);
throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, dm.getActionString(DependencyManager.DROP_COLUMN), columnInfo[0].name, "UNIQUE INDEX", cd.getConglomerateName());
}
}
/* If there are new backing conglomerates which must be
* created to replace a dropped shared conglomerate
* (where the shared conglomerate was dropped as part
* of a "drop conglomerate" call above), then create
* them now. We do this *after* dropping all dependent
* conglomerates because we don't want to waste time
* creating a new conglomerate if it's just going to be
* dropped again as part of another "drop conglomerate"
* call.
*/
createNewBackingCongloms(newCongloms, indexConglomerateNumbers);
IndexRowGenerator[] newIRGs = new IndexRowGenerator[numIndexes];
long[] newIndexConglomNumbers = new long[numIndexes];
collation = new int[numIndexes][];
for (int i = 0, j = 0; i < numIndexes; i++, j++) {
while (compressIRGs[j] == null) j++;
// Setup collation id array to be passed in on call to create index.
collation[i] = compressIRGs[j].getColumnCollationIds(td.getColumnDescriptorList());
int[] baseColumnPositions = compressIRGs[j].baseColumnPositions();
newIRGs[i] = compressIRGs[j];
newIndexConglomNumbers[i] = indexConglomerateNumbers[j];
boolean[] isAscending = compressIRGs[j].isAscending();
boolean reMakeArrays = false;
boolean rewriteBaseColumnPositions = false;
int size = baseColumnPositions.length;
for (int k = 0; k < size; k++) {
if (baseColumnPositions[k] > droppedColumnPosition) {
baseColumnPositions[k]--;
rewriteBaseColumnPositions = true;
} else if (baseColumnPositions[k] == droppedColumnPosition) {
// mark it
baseColumnPositions[k] = 0;
reMakeArrays = true;
}
}
if (rewriteBaseColumnPositions) {
compressIRGs[j].setBaseColumnPositions(baseColumnPositions);
}
if (reMakeArrays) {
size--;
int[] newBCP = new int[size];
boolean[] newIsAscending = new boolean[size];
int[] newCollation = new int[collation[i].length - 1];
for (int k = 0, step = 0; k < size; k++) {
if (step == 0 && baseColumnPositions[k + step] == 0)
step++;
newBCP[k] = baseColumnPositions[k + step];
newIsAscending[k] = isAscending[k + step];
newCollation[k] = collation[i][k + step];
}
IndexDescriptor id = compressIRGs[j].getIndexDescriptor();
id.setBaseColumnPositions(newBCP);
id.setIsAscending(newIsAscending);
id.setNumberOfOrderedColumns(id.numberOfOrderedColumns() - 1);
collation[i] = newCollation;
}
}
compressIRGs = newIRGs;
indexConglomerateNumbers = newIndexConglomNumbers;
} else {
collation = new int[numIndexes][];
for (int i = 0; i < numIndexes; i++) {
collation[i] = compressIRGs[i].getColumnCollationIds(td.getColumnDescriptorList());
}
}
/* Now we are done with updating each index descriptor entry directly
* in SYSCONGLOMERATES (for duplicate index as well), from now on, our
* work should apply ONLY once for each real conglomerate, so we
* compress any duplicate indexes now.
*/
Object[] compressIndexResult = compressIndexArrays(indexConglomerateNumbers, compressIRGs);
if (compressIndexResult != null) {
indexConglomerateNumbers = (long[]) compressIndexResult[1];
compressIRGs = (IndexRowGenerator[]) compressIndexResult[2];
numIndexes = indexConglomerateNumbers.length;
}
indexedCols = new FormatableBitSet(compressTable || truncateTable ? td.getNumberOfColumns() + 1 : td.getNumberOfColumns());
for (int index = 0; index < numIndexes; index++) {
int[] colIds = compressIRGs[index].getIndexDescriptor().baseColumnPositions();
for (int index2 = 0; index2 < colIds.length; index2++) {
indexedCols.set(colIds[index2]);
}
}
}
Aggregations