use of org.apache.derby.iapi.store.access.ConglomerateController in project derby by apache.
the class AlterTableConstantAction method defragmentRows.
/**
* Defragment rows in the given table.
* <p>
* Scans the rows at the end of a table and moves them to free spots
* towards the beginning of the table. In the same transaction all
* associated indexes are updated to reflect the new location of the
* base table row.
* <p>
* After a defragment pass, if was possible, there will be a set of
* empty pages at the end of the table which can be returned to the
* operating system by calling truncateEnd(). The allocation bit
* maps will be set so that new inserts will tend to go to empty and
* half filled pages starting from the front of the conglomerate.
*
* @param tc transaction controller to use to do updates.
*/
private void defragmentRows(TransactionController tc) throws StandardException {
GroupFetchScanController base_group_fetch_cc = null;
int num_indexes = 0;
int[][] index_col_map = null;
ScanController[] index_scan = null;
ConglomerateController[] index_cc = null;
DataValueDescriptor[][] index_row = null;
TransactionController nested_tc = null;
try {
nested_tc = tc.startNestedUserTransaction(false, true);
switch(td.getTableType()) {
/* Skip views and vti tables */
case TableDescriptor.VIEW_TYPE:
case TableDescriptor.VTI_TYPE:
return;
// DERBY-719,DERBY-720
default:
break;
}
/* Get a row template for the base table */
ExecRow br = lcc.getLanguageConnectionFactory().getExecutionFactory().getValueRow(td.getNumberOfColumns());
/* Fill the row with nulls of the correct type */
for (ColumnDescriptor cd : td.getColumnDescriptorList()) {
br.setColumn(cd.getPosition(), cd.getType().getNull());
}
DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
row_array[0] = br.getRowArray();
RowLocation[] old_row_location_array = new RowLocation[100];
RowLocation[] new_row_location_array = new RowLocation[100];
// Create the following 3 arrays which will be used to update
// each index as the scan moves rows about the heap as part of
// the compress:
// index_col_map - map location of index cols in the base row,
// ie. index_col_map[0] is column offset of 1st
// key column in base row. All offsets are 0
// based.
// index_scan - open ScanController used to delete old index row
// index_cc - open ConglomerateController used to insert new
// row
ConglomerateDescriptor[] conglom_descriptors = td.getConglomerateDescriptors();
// conglom_descriptors has an entry for the conglomerate and each
// one of it's indexes.
num_indexes = conglom_descriptors.length - 1;
// if indexes exist, set up data structures to update them
if (num_indexes > 0) {
// allocate arrays
index_col_map = new int[num_indexes][];
index_scan = new ScanController[num_indexes];
index_cc = new ConglomerateController[num_indexes];
index_row = new DataValueDescriptor[num_indexes][];
setup_indexes(nested_tc, td, index_col_map, index_scan, index_cc, index_row);
}
/* Open the heap for reading */
base_group_fetch_cc = nested_tc.defragmentConglomerate(td.getHeapConglomerateId(), false, true, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
int num_rows_fetched;
while ((num_rows_fetched = base_group_fetch_cc.fetchNextGroup(row_array, old_row_location_array, new_row_location_array)) != 0) {
if (num_indexes > 0) {
for (int row = 0; row < num_rows_fetched; row++) {
for (int index = 0; index < num_indexes; index++) {
fixIndex(row_array[row], index_row[index], old_row_location_array[row], new_row_location_array[row], index_cc[index], index_scan[index], index_col_map[index]);
}
}
}
}
// TODO - It would be better if commits happened more frequently
// in the nested transaction, but to do that there has to be more
// logic to catch a ddl that might jump in the middle of the
// above loop and invalidate the various table control structures
// which are needed to properly update the indexes. For example
// the above loop would corrupt an index added midway through
// the loop if not properly handled. See DERBY-1188.
nested_tc.commit();
} finally {
/* Clean up before we leave */
if (base_group_fetch_cc != null) {
base_group_fetch_cc.close();
base_group_fetch_cc = null;
}
if (num_indexes > 0) {
for (int i = 0; i < num_indexes; i++) {
if (index_scan != null && index_scan[i] != null) {
index_scan[i].close();
index_scan[i] = null;
}
if (index_cc != null && index_cc[i] != null) {
index_cc[i].close();
index_cc[i] = null;
}
}
}
if (nested_tc != null) {
nested_tc.destroy();
}
}
}
use of org.apache.derby.iapi.store.access.ConglomerateController in project derby by apache.
the class CreateIndexConstantAction method executeConstantAction.
// INTERFACE METHODS
/**
* This is the guts of the Execution-time logic for
* creating an index.
*
* <P>
* A index is represented as:
* <UL>
* <LI> ConglomerateDescriptor.
* </UL>
* No dependencies are created.
*
* @see ConglomerateDescriptor
* @see SchemaDescriptor
* @see ConstantAction#executeConstantAction
*
* @exception StandardException Thrown on failure
*/
public void executeConstantAction(Activation activation) throws StandardException {
TableDescriptor td;
UUID toid;
ColumnDescriptor columnDescriptor;
int[] baseColumnPositions;
IndexRowGenerator indexRowGenerator = null;
ExecRow[] baseRows;
ExecIndexRow[] indexRows;
ExecRow[] compactBaseRows;
GroupFetchScanController scan;
RowLocationRetRowSource rowSource;
long sortId;
int maxBaseColumnPosition = -1;
LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
DataDictionary dd = lcc.getDataDictionary();
DependencyManager dm = dd.getDependencyManager();
TransactionController tc = lcc.getTransactionExecute();
/*
** Inform the data dictionary that we are about to write to it.
** There are several calls to data dictionary "get" methods here
** that might be done in "read" mode in the data dictionary, but
** it seemed safer to do this whole operation in "write" mode.
**
** We tell the data dictionary we're done writing at the end of
** the transaction.
*/
dd.startWriting(lcc);
/*
** If the schema descriptor is null, then
** we must have just read ourselves in.
** So we will get the corresponding schema
** descriptor from the data dictionary.
*/
SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc, true);
/* Get the table descriptor. */
/* See if we can get the TableDescriptor
* from the Activation. (Will be there
* for backing indexes.)
*/
td = activation.getDDLTableDescriptor();
if (td == null) {
/* tableId will be non-null if adding an index to
* an existing table (as opposed to creating a
* table with a constraint with a backing index).
*/
if (tableId != null) {
td = dd.getTableDescriptor(tableId);
} else {
td = dd.getTableDescriptor(tableName, sd, tc);
}
}
if (td == null) {
throw StandardException.newException(SQLState.LANG_CREATE_INDEX_NO_TABLE, indexName, tableName);
}
if (td.getTableType() == TableDescriptor.SYSTEM_TABLE_TYPE) {
throw StandardException.newException(SQLState.LANG_CREATE_SYSTEM_INDEX_ATTEMPTED, indexName, tableName);
}
/* Get a shared table lock on the table. We need to lock table before
* invalidate dependents, otherwise, we may interfere with the
* compilation/re-compilation of DML/DDL. See beetle 4325 and $WS/
* docs/language/SolutionsToConcurrencyIssues.txt (point f).
*/
lockTableForDDL(tc, td.getHeapConglomerateId(), false);
// depended on this table (including this one)
if (!forCreateTable) {
dm.invalidateFor(td, DependencyManager.CREATE_INDEX, lcc);
}
// Translate the base column names to column positions
baseColumnPositions = new int[columnNames.length];
for (int i = 0; i < columnNames.length; i++) {
// Look up the column in the data dictionary
columnDescriptor = td.getColumnDescriptor(columnNames[i]);
if (columnDescriptor == null) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, columnNames[i], tableName);
}
TypeId typeId = columnDescriptor.getType().getTypeId();
// Don't allow a column to be created on a non-orderable type
ClassFactory cf = lcc.getLanguageConnectionFactory().getClassFactory();
boolean isIndexable = typeId.orderable(cf);
if (isIndexable && typeId.userType()) {
String userClass = typeId.getCorrespondingJavaTypeName();
// run the compare method.
try {
if (cf.isApplicationClass(cf.loadApplicationClass(userClass)))
isIndexable = false;
} catch (ClassNotFoundException cnfe) {
// shouldn't happen as we just check the class is orderable
isIndexable = false;
}
}
if (!isIndexable) {
throw StandardException.newException(SQLState.LANG_COLUMN_NOT_ORDERABLE_DURING_EXECUTION, typeId.getSQLTypeName());
}
// Remember the position in the base table of each column
baseColumnPositions[i] = columnDescriptor.getPosition();
if (maxBaseColumnPosition < baseColumnPositions[i])
maxBaseColumnPosition = baseColumnPositions[i];
}
/* The code below tries to determine if the index that we're about
* to create can "share" a conglomerate with an existing index.
* If so, we will use a single physical conglomerate--namely, the
* one that already exists--to support both indexes. I.e. we will
* *not* create a new conglomerate as part of this constant action.
*
* Deferrable constraints are backed by indexes that are *not* shared
* since they use physically non-unique indexes and as such are
* different from indexes used to represent non-deferrable
* constraints.
*/
// check if we have similar indices already for this table
ConglomerateDescriptor[] congDescs = td.getConglomerateDescriptors();
boolean shareExisting = false;
for (int i = 0; i < congDescs.length; i++) {
ConglomerateDescriptor cd = congDescs[i];
if (!cd.isIndex())
continue;
if (droppedConglomNum == cd.getConglomerateNumber()) {
/* We can't share with any conglomerate descriptor
* whose conglomerate number matches the dropped
* conglomerate number, because that descriptor's
* backing conglomerate was dropped, as well. If
* we're going to share, we have to share with a
* descriptor whose backing physical conglomerate
* is still around.
*/
continue;
}
IndexRowGenerator irg = cd.getIndexDescriptor();
int[] bcps = irg.baseColumnPositions();
boolean[] ia = irg.isAscending();
int j = 0;
/* The conditions which allow an index to share an existing
* conglomerate are as follows:
*
* 1. the set of columns (both key and include columns) and their
* order in the index is the same as that of an existing index AND
*
* 2. the ordering attributes are the same AND
*
* 3. one of the following is true:
* a) the existing index is unique, OR
* b) the existing index is non-unique with uniqueWhenNotNulls
* set to TRUE and the index being created is non-unique, OR
* c) both the existing index and the one being created are
* non-unique and have uniqueWithDuplicateNulls set to FALSE.
*
* 4. hasDeferrableChecking is FALSE.
*/
boolean possibleShare = (irg.isUnique() || !unique) && (bcps.length == baseColumnPositions.length) && !hasDeferrableChecking;
// is set to true (backing index for unique constraint)
if (possibleShare && !irg.isUnique()) {
/* If the existing index has uniqueWithDuplicateNulls set to
* TRUE it can be shared by other non-unique indexes; otherwise
* the existing non-unique index has uniqueWithDuplicateNulls
* set to FALSE, which means the new non-unique conglomerate
* can only share if it has uniqueWithDuplicateNulls set to
* FALSE, as well.
*/
possibleShare = (irg.isUniqueWithDuplicateNulls() || !uniqueWithDuplicateNulls);
}
if (possibleShare && indexType.equals(irg.indexType())) {
for (; j < bcps.length; j++) {
if ((bcps[j] != baseColumnPositions[j]) || (ia[j] != isAscending[j]))
break;
}
}
if (// share
j == baseColumnPositions.length) {
/*
* Don't allow users to create a duplicate index. Allow if being done internally
* for a constraint
*/
if (!isConstraint) {
activation.addWarning(StandardException.newWarning(SQLState.LANG_INDEX_DUPLICATE, indexName, cd.getConglomerateName()));
return;
}
/* Sharing indexes share the physical conglomerate
* underneath, so pull the conglomerate number from
* the existing conglomerate descriptor.
*/
conglomId = cd.getConglomerateNumber();
/* We create a new IndexRowGenerator because certain
* attributes--esp. uniqueness--may be different between
* the index we're creating and the conglomerate that
* already exists. I.e. even though we're sharing a
* conglomerate, the new index is not necessarily
* identical to the existing conglomerate. We have to
* keep track of that info so that if we later drop
* the shared physical conglomerate, we can figure out
* what this index (the one we're creating now) is
* really supposed to look like.
*/
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, // uniqueDeferrable
false, // deferrable indexes are not shared
false, baseColumnPositions, isAscending, baseColumnPositions.length);
// DERBY-655 and DERBY-1343
// Sharing indexes will have unique logical conglomerate UUIDs.
conglomerateUUID = dd.getUUIDFactory().createUUID();
shareExisting = true;
break;
}
}
/* If we have a droppedConglomNum then the index we're about to
* "create" already exists--i.e. it has an index descriptor and
* the corresponding information is already in the system catalogs.
* The only thing we're missing, then, is the physical conglomerate
* to back the index (because the old conglomerate was dropped).
*/
boolean alreadyHaveConglomDescriptor = (droppedConglomNum > -1L);
/* If this index already has an essentially same one, we share the
* conglomerate with the old one, and just simply add a descriptor
* entry into SYSCONGLOMERATES--unless we already have a descriptor,
* in which case we don't even need to do that.
*/
DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
if (shareExisting && !alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
// can't just return yet, need to get member "indexTemplateRow"
// because create constraint may use it
}
// Describe the properties of the index to the store using Properties
// RESOLVE: The following properties assume a BTREE index.
Properties indexProperties;
if (properties != null) {
indexProperties = properties;
} else {
indexProperties = new Properties();
}
// Tell it the conglomerate id of the base table
indexProperties.put("baseConglomerateId", Long.toString(td.getHeapConglomerateId()));
if (uniqueWithDuplicateNulls && !hasDeferrableChecking) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexProperties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
} else {
// index creating a unique index instead.
if (uniqueWithDuplicateNulls) {
unique = true;
}
}
}
// All indexes are unique because they contain the RowLocation.
// The number of uniqueness columns must include the RowLocation
// if the user did not specify a unique index.
indexProperties.put("nUniqueColumns", Integer.toString(unique ? baseColumnPositions.length : baseColumnPositions.length + 1));
// By convention, the row location column is the last column
indexProperties.put("rowLocationColumn", Integer.toString(baseColumnPositions.length));
// For now, all columns are key fields, including the RowLocation
indexProperties.put("nKeyFields", Integer.toString(baseColumnPositions.length + 1));
// For now, assume that all index columns are ordered columns
if (!shareExisting) {
if (dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_4, null)) {
indexRowGenerator = new IndexRowGenerator(indexType, unique, uniqueWithDuplicateNulls, uniqueDeferrable, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), baseColumnPositions, isAscending, baseColumnPositions.length);
} else {
indexRowGenerator = new IndexRowGenerator(indexType, unique, false, false, false, baseColumnPositions, isAscending, baseColumnPositions.length);
}
}
/* Now add the rows from the base table to the conglomerate.
* We do this by scanning the base table and inserting the
* rows into a sorter before inserting from the sorter
* into the index. This gives us better performance
* and a more compact index.
*/
rowSource = null;
sortId = 0;
// set to true once the sorter is created
boolean needToDropSort = false;
/* bulkFetchSIze will be 16 (for now) unless
* we are creating the table in which case it
* will be 1. Too hard to remove scan when
* creating index on new table, so minimize
* work where we can.
*/
int bulkFetchSize = (forCreateTable) ? 1 : 16;
int numColumns = td.getNumberOfColumns();
int approximateRowSize = 0;
// Create the FormatableBitSet for mapping the partial to full base row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
for (int index = 0; index < baseColumnPositions.length; index++) {
bitSet.set(baseColumnPositions[index]);
}
FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1);
// Start by opening a full scan on the base table.
scan = tc.openGroupFetchScan(td.getHeapConglomerateId(), // hold
false, // open base table read only
0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, // all fields as objects
zeroBasedBitSet, // startKeyValue
(DataValueDescriptor[]) null, // not used when giving null start posn.
0, // qualifier
null, // stopKeyValue
(DataValueDescriptor[]) null, // not used when giving null stop posn.
0);
// Create an array to put base row template
baseRows = new ExecRow[bulkFetchSize];
indexRows = new ExecIndexRow[bulkFetchSize];
compactBaseRows = new ExecRow[bulkFetchSize];
try {
// Create the array of base row template
for (int i = 0; i < bulkFetchSize; i++) {
// create a base row template
baseRows[i] = activation.getExecutionFactory().getValueRow(maxBaseColumnPosition);
// create an index row template
indexRows[i] = indexRowGenerator.getIndexRowTemplate();
// create a compact base row template
compactBaseRows[i] = activation.getExecutionFactory().getValueRow(baseColumnPositions.length);
}
indexTemplateRow = indexRows[0];
// Fill the partial row with nulls of the correct type
ColumnDescriptorList cdl = td.getColumnDescriptorList();
int cdlSize = cdl.size();
for (int index = 0, numSet = 0; index < cdlSize; index++) {
if (!zeroBasedBitSet.get(index)) {
continue;
}
numSet++;
ColumnDescriptor cd = cdl.elementAt(index);
DataTypeDescriptor dts = cd.getType();
for (int i = 0; i < bulkFetchSize; i++) {
// Put the column in both the compact and sparse base rows
baseRows[i].setColumn(index + 1, dts.getNull());
compactBaseRows[i].setColumn(numSet, baseRows[i].getColumn(index + 1));
}
// Calculate the approximate row size for the index row
approximateRowSize += dts.getTypeId().getApproximateLengthInBytes(dts);
}
// Get an array of RowLocation template
RowLocation[] rl = new RowLocation[bulkFetchSize];
for (int i = 0; i < bulkFetchSize; i++) {
rl[i] = scan.newRowLocationTemplate();
// Get an index row based on the base row
indexRowGenerator.getIndexRow(compactBaseRows[i], rl[i], indexRows[i], bitSet);
}
/* now that we got indexTemplateRow, done for sharing index
*/
if (shareExisting)
return;
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key.
* We create a basic sort observer for non-unique indexes
* so that we can reuse the wrappers during an external
* sort.
*/
int numColumnOrderings;
SortObserver sortObserver;
Properties sortProperties = null;
if (unique || uniqueWithDuplicateNulls || uniqueDeferrable) {
// if the index is a constraint, use constraintname in
// possible error message
String indexOrConstraintName = indexName;
if (conglomerateUUID != null) {
ConglomerateDescriptor cd = dd.getConglomerateDescriptor(conglomerateUUID);
if ((isConstraint) && (cd != null && cd.getUUID() != null && td != null)) {
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
}
}
if (unique || uniqueDeferrable) {
numColumnOrderings = unique ? baseColumnPositions.length : baseColumnPositions.length + 1;
sortObserver = new UniqueIndexSortObserver(lcc, constraintID, true, uniqueDeferrable, initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
} else {
// unique with duplicate nulls allowed.
numColumnOrderings = baseColumnPositions.length + 1;
// tell transaction controller to use the unique with
// duplicate nulls sorter, when making createSort() call.
sortProperties = new Properties();
sortProperties.put(AccessFactoryGlobals.IMPL_TYPE, AccessFactoryGlobals.SORT_UNIQUEWITHDUPLICATENULLS_EXTERNAL);
// use sort operator which treats nulls unequal
sortObserver = new UniqueWithDuplicateNullsIndexSortObserver(lcc, constraintID, true, (hasDeferrableChecking && constraintType != DataDictionary.FOREIGNKEY_CONSTRAINT), initiallyDeferred, indexOrConstraintName, indexTemplateRow, true, td.getName());
}
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(true, false, indexTemplateRow, true);
}
ColumnOrdering[] order = new ColumnOrdering[numColumnOrderings];
for (int i = 0; i < numColumnOrderings; i++) {
order[i] = new IndexColumnOrder(i, unique || i < numColumnOrderings - 1 ? isAscending[i] : true);
}
// create the sorter
sortId = tc.createSort(sortProperties, indexTemplateRow.getRowArrayClone(), order, sortObserver, // not in order
false, scan.getEstimatedRowCount(), // est row size, -1 means no idea
approximateRowSize);
needToDropSort = true;
// Populate sorter and get the output of the sorter into a row
// source. The sorter has the indexed columns only and the columns
// are in the correct order.
rowSource = loadSorter(baseRows, indexRows, tc, scan, sortId, rl);
conglomId = tc.createAndLoadConglomerate(indexType, // index row template
indexTemplateRow.getRowArray(), // colums sort order
order, indexRowGenerator.getColumnCollationIds(td.getColumnDescriptorList()), indexProperties, // not temporary
TransactionController.IS_DEFAULT, rowSource, (long[]) null);
} finally {
/* close the table scan */
if (scan != null)
scan.close();
/* close the sorter row source before throwing exception */
if (rowSource != null)
rowSource.closeRowSource();
/*
** drop the sort so that intermediate external sort run can be
** removed from disk
*/
if (needToDropSort)
tc.dropSort(sortId);
}
ConglomerateController indexController = tc.openConglomerate(conglomId, false, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
// Check to make sure that the conglomerate can be used as an index
if (!indexController.isKeyed()) {
indexController.close();
throw StandardException.newException(SQLState.LANG_NON_KEYED_INDEX, indexName, indexType);
}
indexController.close();
//
if (!alreadyHaveConglomDescriptor) {
ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID());
dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
// add newly added conglomerate to the list of conglomerate
// descriptors in the td.
ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();
cdl.add(cgd);
/* Since we created a new conglomerate descriptor, load
* its UUID into the corresponding field, to ensure that
* it is properly set in the StatisticsDescriptor created
* below.
*/
conglomerateUUID = cgd.getUUID();
}
CardinalityCounter cCount = (CardinalityCounter) rowSource;
long numRows = cCount.getRowCount();
if (addStatistics(dd, indexRowGenerator, numRows)) {
long[] c = cCount.getCardinality();
for (int i = 0; i < c.length; i++) {
StatisticsDescriptor statDesc = new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(), conglomerateUUID, td.getUUID(), "I", new StatisticsImpl(numRows, c[i]), i + 1);
dd.addDescriptor(statDesc, null, DataDictionary.SYSSTATISTICS_CATALOG_NUM, true, tc);
}
}
}
use of org.apache.derby.iapi.store.access.ConglomerateController in project derby by apache.
the class T_QualifierTest method t_testqual.
/* public methods of T_QualifierTest */
public boolean t_testqual(TransactionController tc) throws StandardException, T_Fail {
boolean ret_val = true;
DataValueDescriptor[] openscan_template = null;
DataValueDescriptor[] fetch_template = null;
DataValueDescriptor[] base_row = null;
T_SecondaryIndexRow index_row = null;
long value = -1;
long[] col1 = { 1, 3, 4, 4, 4, 5, 5, 5, 6, 7, 9 };
long[] col2 = { 1, 1, 2, 4, 6, 2, 4, 6, 1, 1, 1 };
long[] col3 = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 };
long conglomid;
long base_conglomid;
long index_conglomid;
ConglomerateController base_cc = null;
ConglomerateController index_cc = null;
RowLocation base_rowloc = null;
base_row = TemplateRow.newU8Row(3);
if (init_conglomerate_type.compareTo("BTREE") == 0) {
base_conglomid = tc.createConglomerate("heap", base_row, null, null, null, TransactionController.IS_DEFAULT);
index_row = new T_SecondaryIndexRow();
base_cc = tc.openConglomerate(base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
base_rowloc = base_cc.newRowLocationTemplate();
index_row.init(base_row, base_rowloc, 4);
index_conglomid = tc.createConglomerate(init_conglomerate_type, index_row.getRow(), null, null, init_properties, init_temporary ? TransactionController.IS_TEMPORARY : TransactionController.IS_DEFAULT);
index_cc = tc.openConglomerate(index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
conglomid = index_conglomid;
openscan_template = index_row.getRow();
// make another template
T_SecondaryIndexRow fetch_index_row = new T_SecondaryIndexRow();
fetch_index_row.init(TemplateRow.newU8Row(3), base_cc.newRowLocationTemplate(), 4);
fetch_template = fetch_index_row.getRow();
} else {
base_conglomid = tc.createConglomerate(init_conglomerate_type, base_row, // default order
null, // default collation
null, init_properties, init_temporary ? TransactionController.IS_TEMPORARY : TransactionController.IS_DEFAULT);
base_cc = tc.openConglomerate(base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
base_rowloc = base_cc.newRowLocationTemplate();
conglomid = base_conglomid;
openscan_template = base_row;
fetch_template = TemplateRow.newU8Row(3);
}
// insert them in reverse order just to make sure btree is sorting them
for (int i = col1.length - 1; i >= 0; i--) {
((SQLLongint) (base_row[0])).setValue(col1[i]);
((SQLLongint) (base_row[1])).setValue(col2[i]);
((SQLLongint) (base_row[2])).setValue(col3[i]);
base_cc.insertAndFetchLocation(base_row, base_rowloc);
if (init_conglomerate_type.compareTo("BTREE") == 0) {
index_cc.insert(index_row.getRow());
}
}
tc.commit();
// run through a predicates as described in the openScan() interface,
// and implement them in qualifiers rather than start and stop.
//
// Use the following SQLLongint's for qualifier values //
SQLLongint qual_col1 = new SQLLongint(-1);
SQLLongint qual_col2 = new SQLLongint(-1);
SQLLongint qual_col3 = new SQLLongint(-1);
SQLLongint qual_col4 = new SQLLongint(-1);
SQLLongint qual_col5 = new SQLLongint(-1);
SQLLongint qual_col6 = new SQLLongint(-1);
SQLLongint qual_col7 = new SQLLongint(-1);
// test predicate x = 5
//
// result set should be: {5,2,16}, {5,4,17}, {5,6,18}
//
progress("qual scan (x = 5)");
qual_col1.setValue(5);
Qualifier[][] q1 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_EQUALS, false, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q1, null, ScanController.NA, 3, 16, init_order)) {
ret_val = false;
}
// +---------------------------------------------------------+
// |pred |start|key|stop |key|rows returned |rows locked |
// | |value|op |value|op | |(serialization)|
// +------+-----+---+-----+---+--------------+---------------+
// |x > 5 |{5} |GT |null | |{6,1} .. {9,1}|{5,6} .. {9,1} |
// +-----------------------------------------+---------------+
progress("qual scan (x > 5)");
qual_col1.setValue(5);
Qualifier[][] q2 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_LESSOREQUALS, true, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q2, null, ScanController.NA, 3, 19, init_order)) {
ret_val = false;
}
// +---------------------------------------------------------+
// |pred |start|key|stop |key|rows returned |rows locked |
// | |value|op |value|op | |(serialization)|
// +------+-----+---+-----+---+--------------+---------------+
// |x >= 5|{5} |GE |null | |{5,2} .. {9,1}|{4,6} .. {9,1} |
// +-----------------------------------------+---------------+
progress("qual scan (x >= 5)");
qual_col1.setValue(5);
Qualifier[][] q3 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_LESSTHAN, true, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q3, null, ScanController.NA, 6, 16, init_order)) {
ret_val = false;
}
//
// +---------------------------------------------------------+
// |pred |start|key|stop |key|rows returned |rows locked |
// | |value|op |value|op | |(serialization)|
// +------+-----+---+-----+---+--------------+---------------+
// |x <= 5|null | |{5} |GT |{1,1} .. {5,6}|first .. {5,6} |
// +-----------------------------------------+---------------+
progress("qual scan (x <= 5)");
qual_col1.setValue(5);
Qualifier[][] q4 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_LESSOREQUALS, false, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q4, null, ScanController.NA, 8, 11, init_order)) {
ret_val = false;
}
//
// +---------------------------------------------------------+
// |pred |start|key|stop |key|rows returned |rows locked |
// | |value|op |value|op | |(serialization)|
// +------+-----+---+-----+---+--------------+---------------+
// |x < 5 |null | |{5} |GE |{1,1} .. {4,6}|first .. {4,6} |
// +-----------------------------------------+---------------+
progress("qual scan (x < 5)");
qual_col1.setValue(5);
Qualifier[][] q5 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_LESSTHAN, false, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q5, null, ScanController.NA, 5, 11, init_order)) {
ret_val = false;
}
// +------------------------------------------------------------------+
// |pred |start|key|stop |key|rows returned|rows locked |
// | |value|op |value|op | |(serialized) |
// +-----------------+------+--+-----+--+--------------+--------------+
// |x >= 5 and x <= 7|{5}, |GE|{7} |GT|{5,2} .. {7,1}|{4,6} .. {7,1}|
// +------------------------------------------------------------------+
progress("qual scan (x >= 5 and x <= 7)");
qual_col1.setValue(5);
qual_col2.setValue(7);
Qualifier[][] q6 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_LESSTHAN, true, true, true), new QualifierUtil(0, qual_col2, Orderable.ORDER_OP_LESSOREQUALS, false, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q6, null, ScanController.NA, 5, 16, init_order)) {
ret_val = false;
}
// passing qualifier in q6[0][0], q6[0][1] should evaluate same as
// passing in q6[0][0], q6[1][0]
// +------------------------------------------------------------------+
// |pred |start|key|stop |key|rows returned|rows locked |
// | |value|op |value|op | |(serialized) |
// +-----------------+------+--+-----+--+--------------+--------------+
// |x >= 5 and x <= 7|{5}, |GE|{7} |GT|{5,2} .. {7,1}|{4,6} .. {7,1}|
// +------------------------------------------------------------------+
progress("qual scan (x >= 5 and x <= 7)");
qual_col1.setValue(5);
qual_col2.setValue(7);
Qualifier[][] q6_2 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_LESSTHAN, true, true, true) }, { new QualifierUtil(0, qual_col2, Orderable.ORDER_OP_LESSOREQUALS, false, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q6_2, null, ScanController.NA, 5, 16, init_order)) {
ret_val = false;
}
// +------------------------------------------------------------------+
// |pred |start|key|stop |key|rows returned|rows locked |
// | |value|op |value|op | |(serialized) |
// +-----------------+------+--+-----+--+--------------+--------------+
// |x = 5 and y > 2 |{5,2} |GT|{5} |GT|{5,4} .. {5,6}|{5,2} .. {9,1}|
// +------------------------------------------------------------------+
progress("qual scan (x = 5 and y > 2)");
qual_col1.setValue(5);
qual_col2.setValue(2);
Qualifier[][] q7 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_EQUALS, false, true, true), new QualifierUtil(1, qual_col2, Orderable.ORDER_OP_LESSOREQUALS, true, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q7, null, ScanController.NA, 2, 17, init_order)) {
ret_val = false;
}
// +------------------------------------------------------------------+
// |pred |start|key|stop |key|rows returned|rows locked |
// | |value|op |value|op | |(serialized) |
// +-----------------+------+--+-----+--+--------------+--------------+
// |x = 5 and y >= 2 | {5,2}|GE| {5} |GT|{5,2} .. {5,6}|{4,6} .. {9,1}|
// +------------------------------------------------------------------+
progress("qual scan (x = 5 and y >= 2)");
qual_col1.setValue(5);
qual_col2.setValue(2);
Qualifier[][] q8 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_EQUALS, false, true, true), new QualifierUtil(1, qual_col2, Orderable.ORDER_OP_LESSTHAN, true, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q8, null, ScanController.NA, 3, 16, init_order)) {
ret_val = false;
}
// +------------------------------------------------------------------+
// |pred |start|key|stop |key|rows returned|rows locked |
// | |value|op |value|op | |(serialized) |
// +-----------------+------+--+-----+--+--------------+--------------+
// |x = 5 and y < 5 | {5} |GE|{5,5}|GE|{5,2} .. {5,4}|{4,6} .. {5,4}|
// +------------------------------------------------------------------+
progress("qual scan (x = 5 and y < 5)");
qual_col1.setValue(5);
qual_col2.setValue(5);
Qualifier[][] q9 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_EQUALS, false, true, true), new QualifierUtil(1, qual_col1, Orderable.ORDER_OP_LESSTHAN, false, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q9, null, ScanController.NA, 2, 16, init_order)) {
ret_val = false;
}
// +------------------------------------------------------------------+
// |pred |start|key|stop |key|rows returned|rows locked |
// | |value|op |value|op | |(serialized) |
// +-----------------+------+--+-----+--+--------------+--------------+
// |x = 2 | {2} |GE| {2} |GT|none |{1,1} .. {1,1}|
// +------------------------------------------------------------------+
progress("qual scan (x = 2)");
qual_col1.setValue(2);
Qualifier[][] q10 = { { new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_EQUALS, false, true, true) } };
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q10, null, ScanController.NA, 0, 0, init_order)) {
ret_val = false;
}
// +------------------------------------------------------------------+
// |pred |start|key|stop |key|rows returned |rows locked |
// | |value|op |value|op | |(serialized) |
// +----------------+-----+---+-----+-- +--------------+--------------+
// |x >= 5 or y = 6 | null| | null| |{4,6} .. {9,1}|{1,1} .. {9,1}|
// +------------------------------------------------------------------+
progress("qual scan (x >= 5) or (y = 6)");
qual_col1.setValue(5);
qual_col2.setValue(6);
Qualifier[][] q11 = new Qualifier[2][];
q11[0] = new Qualifier[0];
q11[1] = new Qualifier[2];
q11[1][0] = new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_GREATEROREQUALS, false, true, true);
q11[1][1] = new QualifierUtil(1, qual_col2, Orderable.ORDER_OP_EQUALS, false, true, true);
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q11, null, ScanController.NA, 7, 15, init_order)) {
ret_val = false;
}
// +------------------------------------------------------------------+
// |pred |start|key|stop |key|rows returned |rows locked |
// | |value|op |value|op | |(serialized) |
// +----------------+-----+---+-----+-- +--------------+--------------+
// |(x = 1 or y = 1 or y = 6)|
// | and |
// |(x > 5 or y = 1)|
// | and |
// |(x = 9 or x = 7)|null | | null| |{7,1} .. {9,1}|{1,1} .. {9,1}|
// +------------------------------------------------------------------+
progress("qual scan (x = 1 or y = 1 or y = 6) and (x > 5 or y = 1) and (x = 9 or x = 7)");
qual_col1.setValue(1);
qual_col2.setValue(1);
qual_col3.setValue(6);
qual_col4.setValue(5);
qual_col5.setValue(1);
qual_col6.setValue(9);
qual_col7.setValue(7);
Qualifier[][] q12 = new Qualifier[4][];
q12[0] = new Qualifier[0];
q12[1] = new Qualifier[3];
q12[2] = new Qualifier[2];
q12[3] = new Qualifier[2];
q12[1][0] = new QualifierUtil(0, qual_col1, Orderable.ORDER_OP_EQUALS, false, true, true);
q12[1][1] = new QualifierUtil(1, qual_col2, Orderable.ORDER_OP_EQUALS, false, true, true);
q12[1][2] = new QualifierUtil(1, qual_col3, Orderable.ORDER_OP_EQUALS, false, true, true);
q12[2][0] = new QualifierUtil(0, qual_col4, Orderable.ORDER_OP_GREATERTHAN, false, true, true);
q12[2][1] = new QualifierUtil(1, qual_col5, Orderable.ORDER_OP_EQUALS, false, true, true);
q12[3][0] = new QualifierUtil(0, qual_col6, Orderable.ORDER_OP_EQUALS, false, true, true);
q12[3][1] = new QualifierUtil(0, qual_col7, Orderable.ORDER_OP_EQUALS, false, true, true);
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q12, null, ScanController.NA, 2, 20, init_order)) {
ret_val = false;
}
// +------------------------------------------------------------------+
// |pred |start|key|stop |key|rows returned |rows locked |
// | |value|op |value|op | |(serialized) |
// +----------------+-----+---+-----+-- +--------------+--------------+
// |(y = 4 or y = 1)|
// | and |
// |(x = 1 or x = 4 or x= 9)|
// | and |
// |(z = 15 or z = 14)|null | | null| |{4,4} .. {4,4}| ALL |
// +------------------------------------------------------------------+
progress("qual scan (x = 1 or x = 4 or x= 9) and (y = 4 or y = 1) and (z = 15 or z = 14)");
qual_col1.setValue(4);
qual_col2.setValue(1);
qual_col3.setValue(1);
qual_col4.setValue(4);
qual_col5.setValue(9);
qual_col6.setValue(15);
qual_col7.setValue(14);
Qualifier[][] q13 = new Qualifier[4][];
q13[0] = new Qualifier[0];
q13[1] = new Qualifier[2];
q13[2] = new Qualifier[3];
q13[3] = new Qualifier[2];
q13[1][0] = new QualifierUtil(1, qual_col1, Orderable.ORDER_OP_EQUALS, false, true, true);
q13[1][1] = new QualifierUtil(1, qual_col2, Orderable.ORDER_OP_EQUALS, false, true, true);
q13[2][0] = new QualifierUtil(0, qual_col4, Orderable.ORDER_OP_EQUALS, false, true, true);
q13[2][1] = new QualifierUtil(0, qual_col5, Orderable.ORDER_OP_EQUALS, false, true, true);
q13[2][2] = new QualifierUtil(0, qual_col3, Orderable.ORDER_OP_EQUALS, false, true, true);
q13[3][0] = new QualifierUtil(2, qual_col6, Orderable.ORDER_OP_EQUALS, false, true, true);
q13[3][1] = new QualifierUtil(2, qual_col7, Orderable.ORDER_OP_EQUALS, false, true, true);
if (!t_scan(tc, conglomid, openscan_template, fetch_template, null, ScanController.NA, q13, null, ScanController.NA, 1, 14, init_order)) {
ret_val = false;
}
tc.commit();
progress("Ending t_testqual");
return (ret_val);
}
use of org.apache.derby.iapi.store.access.ConglomerateController in project derby by apache.
the class T_CreateConglomRet method t_012.
/**
* Test Special cases of split.
* <p>
* Testing: restartSplitFor() call in BranchControlRow().
*
* The second case is the same as the first except the calling code is
* trying to split a branch page and the parent branch page doesn't have
* room for the row.
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_012(TransactionController tc) throws StandardException, T_Fail {
boolean ret_val = true;
REPORT("Starting t_011");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
createCongloms(tc, 2, false, true, 0, create_ret);
// Open the base conglomerate.
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the index conglomerate.
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a row.
T_SecondaryIndexRow index_row = new T_SecondaryIndexRow();
RowLocation rowloc = base_cc.newRowLocationTemplate();
DataValueDescriptor[] base_row = TemplateRow.newU8Row(2);
base_row[0] = new SQLChar("aaaaaaaaaa");
index_row.init(base_row, rowloc, 3);
((SQLChar) base_row[0]).setValue(T_b2i.repeatString("a", 1000));
((SQLLongint) base_row[1]).setValue(1);
base_cc.insertAndFetchLocation(base_row, rowloc);
// CAUSE BRANCH splitFor to loop:
// pick numbers so that split will happen in middle of page. Do this
// by first inserting last row in table and then insert smaller rows,
// then insert rows before it until the table is just ready to split
// the root, and finally insert some shorter rows in such a way as
// they cause a split but the split point is chosen with one of the
// larger rows as the descriminator causing 1st splitfor pass to fail
// and loop back and do a splitFor the larger row.
// insert enough rows so the tree is 3 levels, just ready to go to
// 4 levels.
((SQLChar) base_row[0]).setValue(T_b2i.repeatString("ma", 500));
for (int i = 0; i < 3; i++) {
((SQLLongint) base_row[1]).setValue(i);
base_cc.insertAndFetchLocation(base_row, rowloc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
((SQLChar) base_row[0]).setValue(T_b2i.repeatString("m", 1000));
for (int i = 3; i < 23; i++) {
((SQLLongint) base_row[1]).setValue(i);
base_cc.insertAndFetchLocation(base_row, rowloc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
((SQLChar) base_row[0]).setValue(T_b2i.repeatString("a", 600));
for (int i = 123; i > 111; i--) {
((SQLLongint) base_row[1]).setValue(i * 2);
base_cc.insertAndFetchLocation(base_row, rowloc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
{
((SQLLongint) base_row[1]).setValue(227);
base_cc.insertAndFetchLocation(base_row, rowloc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
// ((B2IController)index_cc).printTree();
tc.commit();
// Close the conglomerate.
index_cc.close();
REPORT("Ending t_012");
return (ret_val);
}
use of org.apache.derby.iapi.store.access.ConglomerateController in project derby by apache.
the class T_CreateConglomRet method t_005.
/**
* Test Branch splits - number of rows necessary to cause splits is raw
* store implementation dependant (currently 5 rows per page in in-memory
* implementation).
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_005(TransactionController tc) throws StandardException, T_Fail {
boolean ret_val = true;
REPORT("Starting t_005");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
createCongloms(tc, 2, false, false, 0, create_ret);
// Open the base conglomerate.
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the index conglomerate.
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a row.
T_SecondaryIndexRow index_row = new T_SecondaryIndexRow();
RowLocation rowloc = base_cc.newRowLocationTemplate();
DataValueDescriptor[] base_row = TemplateRow.newU8Row(2);
index_row.init(base_row, rowloc, 3);
// insert them in reverse order just to make sure btree is sorting them
for (int i = 200; i >= 0; i -= 4) {
((SQLLongint) base_row[0]).setValue(1);
((SQLLongint) base_row[1]).setValue(i);
base_cc.insertAndFetchLocation(base_row, rowloc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
for (int i = 199; i >= 0; i -= 4) {
((SQLLongint) base_row[0]).setValue(1);
((SQLLongint) base_row[1]).setValue(i);
base_cc.insertAndFetchLocation(base_row, rowloc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
index_cc.checkConsistency();
// Close the conglomerate.
index_cc.close();
tc.commit();
// Search for each of the keys and delete them one at a time.
DataValueDescriptor[] delete_key = TemplateRow.newU8Row(2);
for (int i = 200; i >= 0; i -= 4) {
((SQLLongint) delete_key[0]).setValue(1);
((SQLLongint) delete_key[1]).setValue(i);
if (!t_delete(tc, create_ret.index_conglomid, delete_key, false)) {
ret_val = false;
}
}
for (int i = 199; i >= 0; i -= 4) {
((SQLLongint) delete_key[0]).setValue(1);
((SQLLongint) delete_key[1]).setValue(i);
if (!t_delete(tc, create_ret.index_conglomid, delete_key, false)) {
ret_val = false;
}
}
tc.commit();
// Open the base conglomerate.
base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the conglomerate.
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// flush and empty cache to make sure rereading stuff works.
RawStoreFactory rawstore = (RawStoreFactory) findServiceModule(this.store_module, RawStoreFactory.MODULE);
rawstore.idle();
for (int i = 200; i >= 0; i -= 3) {
((SQLLongint) base_row[0]).setValue(1);
((SQLLongint) base_row[1]).setValue(i);
base_cc.insertAndFetchLocation(base_row, rowloc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
for (int i = 200; i >= 0; i -= 3) {
((SQLLongint) delete_key[0]).setValue(1);
((SQLLongint) delete_key[1]).setValue(i);
if (!t_delete(tc, create_ret.index_conglomid, delete_key, false)) {
ret_val = false;
}
}
// index check - there should be no records left.
ScanController empty_scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
if (empty_scan.next())
throw T_Fail.testFailMsg("t_005: there are still rows in table.");
index_cc.checkConsistency();
for (int i = 600; i >= 400; i -= 3) {
((SQLLongint) base_row[0]).setValue(1);
((SQLLongint) base_row[1]).setValue(i);
base_cc.insertAndFetchLocation(base_row, rowloc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
index_cc.checkConsistency();
tc.abort();
// index check - there should be no records left.
empty_scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
if (empty_scan.next())
throw T_Fail.testFailMsg("t_005: there are still rows in table.");
REPORT("Ending t_005");
return (ret_val);
}
Aggregations