use of org.apache.derby.iapi.store.access.BackingStoreHashtable in project derby by apache.
the class ReferencedKeyRIChecker method rememberKey.
/**
* Remember the deletion of this key, it may cause a RESTRICT
* foreign key violation, cf. logic in @{link #postCheck}.
* @param rememberRow
* @throws StandardException
*/
private void rememberKey(ExecRow rememberRow) throws StandardException {
if (deletedKeys == null) {
// key: all columns (these are index rows, or a row containing a
// row location)
identityMap = new int[numColumns];
for (int i = 0; i < numColumns; i++) {
identityMap[i] = i;
}
deletedKeys = new BackingStoreHashtable(tc, null, identityMap, // remove duplicates: no need for more copies:
true, // one is enough to know what to look for on commit
-1, HashScanResultSet.DEFAULT_MAX_CAPACITY, HashScanResultSet.DEFAULT_INITIAL_CAPACITY, HashScanResultSet.DEFAULT_MAX_CAPACITY, false, false);
}
DataValueDescriptor[] row = rememberRow.getRowArray();
for (int i = 0; i < numColumns; i++) {
refKey[i] = row[fkInfo.colArray[i] - 1];
}
Object hashKey = KeyHasher.buildHashKey(refKey, identityMap);
DataValueDescriptor[] savedRow = (DataValueDescriptor[]) deletedKeys.remove(hashKey);
if (savedRow == null) {
savedRow = new DataValueDescriptor[numColumns + 1];
System.arraycopy(refKey, 0, savedRow, 0, numColumns);
savedRow[numColumns] = new SQLLongint(1);
} else {
savedRow[numColumns] = new SQLLongint(((SQLLongint) savedRow[numColumns]).getLong() + 1);
}
deletedKeys.putRow(false, savedRow, null);
}
use of org.apache.derby.iapi.store.access.BackingStoreHashtable in project derby by apache.
the class ScrollInsensitiveResultSet method openCore.
//
// ResultSet interface (leftover from NoPutResultSet)
//
/**
* open a scan on the source. scan parameters are evaluated
* at each open, so there is probably some way of altering
* their values...
*
* @exception StandardException thrown on failure
*/
public void openCore() throws StandardException {
beginTime = getCurrentTimeMillis();
if (SanityManager.DEBUG)
SanityManager.ASSERT(!isOpen, "ScrollInsensitiveResultSet already open");
source.openCore();
isOpen = true;
numOpens++;
/* Create the hash table. We pass
* null in as the row source as we will
* build the hash table on demand as
* the user scrolls.
* The 1st column, the position in the
* scan, will be the key column.
*/
final int[] keyCols = new int[] { 0 };
/* We don't use the optimizer row count for this because it could be
* wildly pessimistic. We only use Hash tables when the optimizer row count
* is within certain bounds. We have no alternative for scrolling insensitive
* cursors so we'll just trust that it will fit.
* We need BackingStoreHashtable to actually go to disk when it doesn't fit.
* This is a known limitation.
*/
ht = new BackingStoreHashtable(getTransactionController(), null, keyCols, false, // don't trust optimizer row count
-1, HashScanResultSet.DEFAULT_MAX_CAPACITY, HashScanResultSet.DEFAULT_INITIAL_CAPACITY, HashScanResultSet.DEFAULT_MAX_CAPACITY, false, keepAfterCommit);
// When re-using language result sets (DERBY-827) we need to
// reset some member variables to the value they would have
// had in a newly constructed object.
lastPosition = 0;
needsRepositioning = false;
numFromHashTable = 0;
numToHashTable = 0;
positionInSource = 0;
seenFirst = false;
seenLast = false;
maxRows = activation.getMaxRows();
openTime += getElapsedMillis(beginTime);
setBeforeFirstRow();
}
use of org.apache.derby.iapi.store.access.BackingStoreHashtable in project derby by apache.
the class HashTableResultSet method openCore.
//
// NoPutResultSet interface
//
/**
* open a scan on the table. scan parameters are evaluated
* at each open, so there is probably some way of altering
* their values...
*
* @exception StandardException thrown if cursor finished.
*/
public void openCore() throws StandardException {
TransactionController tc;
beginTime = getCurrentTimeMillis();
// - sometimes get NullPointerException in openCore().
if (SanityManager.DEBUG) {
SanityManager.ASSERT(source != null, "HTRS().openCore(), source expected to be non-null");
}
// is access to open controlled and ensured valid.
if (SanityManager.DEBUG)
SanityManager.ASSERT(!isOpen, "HashTableResultSet already open");
// Get the current transaction controller
tc = activation.getTransactionController();
if (!hashTableBuilt) {
source.openCore();
/* Create and populate the hash table. We pass
* ourself in as the row source. This allows us
* to apply the single table predicates to the
* rows coming from our child as we build the
* hash table.
*/
ht = new BackingStoreHashtable(tc, this, keyColumns, removeDuplicates, (int) optimizerEstimatedRowCount, maxInMemoryRowCount, (int) initialCapacity, loadFactor, skipNullKeyColumns, false);
if (runTimeStatsOn) {
hashtableSize = ht.size();
if (scanProperties == null) {
scanProperties = new Properties();
}
try {
if (ht != null) {
ht.getAllRuntimeStats(scanProperties);
}
} catch (StandardException se) {
// ignore
}
}
isOpen = true;
hashTableBuilt = true;
}
resetProbeVariables();
numOpens++;
openTime += getElapsedMillis(beginTime);
}
use of org.apache.derby.iapi.store.access.BackingStoreHashtable in project derby by apache.
the class T_QualifierTest method t_scanFetchHashtable.
/**
* Test scan which does FetchSet.
* <p>
* FetchSet() returns the entire result set in the hash table.
* <p>
*
* @return Whether the test succeeded or not.
*
* @exception StandardException Standard exception policy.
*/
public static boolean t_scanFetchHashtable(TransactionController tc, long conglomid, DataValueDescriptor[] fetch_template, DataValueDescriptor[] start_key, int start_op, Qualifier[][] qualifier, DataValueDescriptor[] stop_key, int stop_op, int expect_numrows, int input_expect_key, int order) throws StandardException, T_Fail {
HashSet set = null;
long key;
long numrows = 0;
boolean ordered = (order == ORDER_FORWARD || order == ORDER_DESC);
set = create_hash_set(input_expect_key, expect_numrows, order);
// select entire data set into a hash table, with first column key
int[] keyColumns = new int[1];
keyColumns[0] = 0;
BackingStoreHashtable result_set = tc.createBackingStoreHashtableFromScan(conglomid, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, start_key, start_op, qualifier, stop_key, stop_op, // no limit on total rows.
-1, // first column is hash key column
keyColumns, // don't remove duplicates
false, // no estimate of rows
-1, // put it all into memory
-1, // use default initial capacity
-1, // use default load factor
-1, // don't maintain runtime statistics
false, // don't skip null key columns
false, // don't keep after commit
false, // don't include row locations
false);
// make sure the expected result set is the same as the actual result
// set.
Enumeration e = result_set.elements();
while (e.hasMoreElements()) {
Object obj;
DataValueDescriptor[] row = null;
if ((obj = e.nextElement()) instanceof DataValueDescriptor[]) {
row = (DataValueDescriptor[]) obj;
key = ((SQLLongint) (row[2])).getLong();
if (!set.remove(key)) {
return (fail("(t_scanFetchHashtable-obj) wrong key, expected (" + input_expect_key + ")" + "but got (" + key + ")."));
}
numrows++;
} else if (obj instanceof List) {
List row_vect = (List) obj;
for (int i = 0; i < row_vect.size(); i++) {
row = (DataValueDescriptor[]) row_vect.get(i);
key = ((SQLLongint) (row[2])).getLong();
if (!set.remove(key)) {
return (fail("(t_scanFetchHashtable-vector) wrong key, expected (" + input_expect_key + ")" + "but got (" + key + ")."));
}
numrows++;
}
} else {
return (fail("(t_scanFetchHashtable) got bad type for data: " + obj));
}
}
if (numrows != expect_numrows) {
return (fail("(t_scanFetchHashtable) wrong number of rows. Expected " + expect_numrows + " rows, but got " + numrows + "rows."));
}
result_set.close();
// select entire data set into a hash table, with key being
// the third column, which is the unique id used to verify the
// right result set is being returned.:
// open a new scan
keyColumns[0] = 2;
result_set = tc.createBackingStoreHashtableFromScan(conglomid, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, start_key, start_op, qualifier, stop_key, stop_op, // no limit on total rows.
-1, // third column is hash key column
keyColumns, // don't remove duplicates
false, // no estimate of rows
-1, // put it all into memory
-1, // use default initial capacity
-1, // use default load factor
-1, // don't maintain runtime statistics
false, // don't skip null key columns
false, // don't keep after commit
false, // don't include row locations
false);
Object removed_obj;
for (numrows = 0; numrows < expect_numrows; numrows++) {
long exp_key;
if (order == ORDER_DESC)
exp_key = input_expect_key - numrows;
else
exp_key = input_expect_key + numrows;
if ((removed_obj = result_set.remove(new SQLLongint(exp_key))) == null) {
fail("(t_scanFetchHashtable-2-vector) wrong key, expected (" + (exp_key) + ")" + "but did not find it.");
}
}
if (numrows != expect_numrows) {
return (fail("(t_scanFetchHashtable-2) wrong number of rows. Expected " + expect_numrows + " rows, but got " + numrows + "rows."));
}
return (true);
}
use of org.apache.derby.iapi.store.access.BackingStoreHashtable in project derby by apache.
the class UpdateResultSet method notifyForUpdateCursor.
/* beetle 3865, updateable cursor use index. If the row we are updating has new value that
* falls into the direction of the index scan of the cursor, we save this rid into a hash table
* (for fast search), so that when the cursor hits it again, it knows to skip it.
*/
private void notifyForUpdateCursor(DataValueDescriptor[] row, DataValueDescriptor[] newBaseRow, RowLocation rl, TableScanResultSet tableScan) throws StandardException {
int[] indexCols = tableScan.indexCols;
int[] changedCols = constants.changedColumnIds;
boolean placedForward = false, ascending, decided = false, overlap = false;
int basePos, k;
/* first of all, we see if there's overlap between changed column ids and index key
* columns. If so, we see if the new update value falls into the future range of the
* index scan, if so, we need to save it in hash table.
*/
for (int i = 0; i < indexCols.length; i++) {
basePos = indexCols[i];
if (basePos > 0)
ascending = true;
else {
ascending = false;
basePos = -basePos;
}
for (int j = 0; j < changedCols.length; j++) {
if (basePos == changedCols[j]) {
// we pretty much decided if new row falls in front
decided = true;
// of the cursor or behind
/* the row and newBaseRow we get are compact base row that only have
* referenced columns. Our "basePos" is index in sparse heap row, so
* we need the BaseRowReadMap to map into the compact row.
*/
int[] map = constants.getBaseRowReadMap();
if (map == null)
k = basePos - 1;
else
k = map[basePos - 1];
DataValueDescriptor key = row[k];
/* Starting from the first index key column forward, we see if the direction
* of the update change is consistent with the direction of index scan.
* If so, we save it in hash table.
*/
if ((ascending && key.greaterThan(newBaseRow[k], key).equals(true)) || (!ascending && key.lessThan(newBaseRow[k], key).equals(true)))
placedForward = true;
else if (key.equals(newBaseRow[k], key).equals(true)) {
decided = false;
overlap = true;
}
break;
}
}
if (// already decided if new row falls in front or behind
decided)
break;
}
/* If index row gets updated but key value didn't actually change, we still
* put it in hash table because it can either fall in front or behind. This
* can happen if the update explicitly sets a value, but same as old.
*/
if (overlap && !decided)
placedForward = true;
if (// add it to hash table
placedForward) {
/* determining initial capacity of hash table from a few factors:
* (1) user specified MAX_MEMORY_PER_TABLE property, (2) min value 100
* (3) optimizer estimated row count. We want to avoid re-hashing if
* possible, for performance reason, yet don't waste space. If initial
* capacity is greater than max size divided by load factor, no rehash
* is ever needed.
*/
int maxCapacity = lcc.getOptimizerFactory().getMaxMemoryPerTable() / 16;
if (maxCapacity < 100)
maxCapacity = 100;
if (tableScan.past2FutureTbl == null) {
double rowCnt = tableScan.getEstimatedRowCount();
int initCapacity = 32 * 1024;
if (rowCnt > 0.0) {
// load factor
rowCnt = rowCnt / 0.75 + 1.0;
if (rowCnt < initCapacity)
initCapacity = (int) rowCnt;
}
if (maxCapacity < initCapacity)
initCapacity = maxCapacity;
tableScan.past2FutureTbl = new BackingStoreHashtable(tc, null, new int[] { 0 }, false, -1, maxCapacity, initCapacity, -1, false, tableScan.getActivation().getResultSetHoldability());
}
/* Add the row location to the hash table.
*
* Need to save a clone because when we get cached currentRow, "rl"
* shares the same reference, so is changed at the same time.
*/
tableScan.past2FutureTbl.putRow(false, new DataValueDescriptor[] { rl.cloneValue(false) }, null);
}
}
Aggregations