Search in sources :

Example 1 with LocatedRow

use of org.apache.derby.iapi.types.LocatedRow in project derby by apache.

the class BackingStoreHashtable method getEstimatedMemUsage.

/**
 * Take a value which will go into the hash table and return an estimate
 * of how much memory that value will consume. The hash value could
 * be either an array of columns or a LocatedRow.
 *
 * @param hashValue The object for which we want to know the memory usage.
 * @return A guess as to how much memory the current hash value will
 *  use.
 */
private long getEstimatedMemUsage(Object hashValue) {
    long rowMem = 0;
    DataValueDescriptor[] row = null;
    if (hashValue instanceof DataValueDescriptor[]) {
        row = (DataValueDescriptor[]) hashValue;
    } else {
        LocatedRow locatedRow = (LocatedRow) hashValue;
        row = locatedRow.columnValues();
        // account for the RowLocation size and class overhead
        RowLocation rowLocation = locatedRow.rowLocation();
        if (rowLocation != null) {
            rowMem += locatedRow.rowLocation().estimateMemoryUsage();
            rowMem += ClassSize.refSize;
        }
        // account for class overhead of the LocatedRow itself
        rowMem += ClassSize.refSize;
    }
    for (int i = 0; i < row.length; i++) {
        // account for the column's size and class overhead
        rowMem += row[i].estimateMemoryUsage();
        rowMem += ClassSize.refSize;
    }
    // account for the class overhead of the array itself
    rowMem += ClassSize.refSize;
    return rowMem;
}
Also used : LocatedRow(org.apache.derby.iapi.types.LocatedRow) DataValueDescriptor(org.apache.derby.iapi.types.DataValueDescriptor) RowLocation(org.apache.derby.iapi.types.RowLocation)

Example 2 with LocatedRow

use of org.apache.derby.iapi.types.LocatedRow in project derby by apache.

the class BackingStoreHashtable method makeDiskRow.

// end of spillToDisk
/**
 * <p>
 * Make a full set of columns from an object which is either already
 * an array of column or otherwise a LocatedRow. The full set of columns
 * is what's stored on disk when we spill to disk. This is the inverse of
 * makeInMemoryRow().
 * </p>
 */
private DataValueDescriptor[] makeDiskRow(Object raw) {
    DataValueDescriptor[] allColumns = null;
    if (includeRowLocations()) {
        LocatedRow locatedRow = (LocatedRow) raw;
        allColumns = makeDiskRow(locatedRow.columnValues(), locatedRow.rowLocation());
    } else {
        allColumns = (DataValueDescriptor[]) raw;
    }
    return allColumns;
}
Also used : LocatedRow(org.apache.derby.iapi.types.LocatedRow) DataValueDescriptor(org.apache.derby.iapi.types.DataValueDescriptor)

Example 3 with LocatedRow

use of org.apache.derby.iapi.types.LocatedRow in project derby by apache.

the class BackingStoreHashtable method spillToDisk.

// end of doSpaceAccounting
/**
 * Determine whether a new row should be spilled to disk and, if so, do it.
 *
 * @param columnValues  Actual columns from source row.
 * @param rowLocation       Optional row location.
 *
 * @return true if the row was spilled to disk, false if not
 *
 * @exception  StandardException  Standard exception policy.
 */
private boolean spillToDisk(DataValueDescriptor[] columnValues, RowLocation rowLocation) throws StandardException {
    // Once we have started spilling all new rows will go to disk, even if we have freed up some
    // memory by moving duplicates to disk. This simplifies handling of duplicates and accounting.
    DataValueDescriptor[] diskRow = null;
    if (diskHashtable == null) {
        if (max_inmemory_rowcnt > 0) {
            if (inmemory_rowcnt < max_inmemory_rowcnt) {
                // Do not spill
                return false;
            }
        } else if (max_inmemory_size > getEstimatedMemUsage(!includeRowLocations() ? columnValues : new LocatedRow(columnValues, rowLocation))) {
            return false;
        }
        // Want to start spilling
        diskRow = makeDiskRow(columnValues, rowLocation);
        diskHashtable = new DiskHashtable(tc, diskRow, // TODO-COLLATION, set non default collation if necessary.
        (int[]) null, key_column_numbers, remove_duplicates, keepAfterCommit);
    }
    Object key = KeyHasher.buildHashKey(columnValues, key_column_numbers);
    Object duplicateValue = hash_table.get(key);
    if (duplicateValue != null) {
        if (remove_duplicates)
            // a degenerate case of spilling
            return true;
        // This simplifies finding duplicates: they are either all in memory or all on disk.
        if (duplicateValue instanceof List) {
            List duplicateVec = (List) duplicateValue;
            for (int i = duplicateVec.size() - 1; i >= 0; i--) {
                diskHashtable.put(key, makeDiskRow(duplicateVec.get(i)));
            }
        } else {
            diskHashtable.put(key, makeDiskRow(duplicateValue));
        }
        hash_table.remove(key);
    }
    if (diskRow == null) {
        diskRow = makeDiskRow(columnValues, rowLocation);
    }
    diskHashtable.put(key, diskRow);
    return true;
}
Also used : LocatedRow(org.apache.derby.iapi.types.LocatedRow) ArrayList(java.util.ArrayList) List(java.util.List) DataValueDescriptor(org.apache.derby.iapi.types.DataValueDescriptor)

Example 4 with LocatedRow

use of org.apache.derby.iapi.types.LocatedRow in project derby by apache.

the class BackingStoreHashtable method add_row_to_hash_table.

/**
 * Do the work to add one row to the hash table.
 * <p>
 *
 * @param columnValues               Row to add to the hash table.
 * @param rowLocation   Location of row in conglomerate; could be null.
 * @param needsToClone      If the row needs to be cloned
 *
 * @exception  StandardException  Standard exception policy.
 */
private void add_row_to_hash_table(DataValueDescriptor[] columnValues, RowLocation rowLocation, boolean needsToClone) throws StandardException {
    if (spillToDisk(columnValues, rowLocation)) {
        return;
    }
    if (needsToClone) {
        columnValues = cloneRow(columnValues);
    }
    Object key = KeyHasher.buildHashKey(columnValues, key_column_numbers);
    Object hashValue = !includeRowLocations() ? columnValues : new LocatedRow(columnValues, rowLocation);
    Object duplicate_value = hash_table.put(key, hashValue);
    if (duplicate_value == null) {
        doSpaceAccounting(hashValue, false);
    } else {
        if (!remove_duplicates) {
            RowList row_vec;
            // inserted a duplicate
            if (duplicate_value instanceof RowList) {
                doSpaceAccounting(hashValue, false);
                row_vec = (RowList) duplicate_value;
            } else {
                // allocate list to hold duplicates
                row_vec = new RowList(2);
                // insert original row into vector
                row_vec.add(duplicate_value);
                doSpaceAccounting(hashValue, true);
            }
            // insert new row into list
            row_vec.add(hashValue);
            // store list of rows back into hash table,
            // overwriting the duplicate key that was
            // inserted.
            hash_table.put(key, row_vec);
        }
    }
}
Also used : LocatedRow(org.apache.derby.iapi.types.LocatedRow)

Aggregations

LocatedRow (org.apache.derby.iapi.types.LocatedRow)4 DataValueDescriptor (org.apache.derby.iapi.types.DataValueDescriptor)3 ArrayList (java.util.ArrayList)1 List (java.util.List)1 RowLocation (org.apache.derby.iapi.types.RowLocation)1