use of org.hsqldb_voltpatches.lib.HashSet in project voltdb by VoltDB.
the class StatementDML method delete.
/**
* Highest level multiple row delete method. Corresponds to an SQL
* DELETE.
*/
int delete(Session session, Table table, RowSetNavigator oldRows) {
if (table.fkMainConstraints.length == 0) {
deleteRows(session, table, oldRows);
oldRows.beforeFirst();
if (table.hasTrigger(Trigger.DELETE_AFTER)) {
table.fireAfterTriggers(session, Trigger.DELETE_AFTER, oldRows);
}
return oldRows.getSize();
}
HashSet path = session.sessionContext.getConstraintPath();
HashMappedList tableUpdateList = session.sessionContext.getTableUpdateList();
if (session.database.isReferentialIntegrity()) {
oldRows.beforeFirst();
while (oldRows.hasNext()) {
oldRows.next();
Row row = oldRows.getCurrentRow();
path.clear();
checkCascadeDelete(session, table, tableUpdateList, row, false, path);
}
}
if (session.database.isReferentialIntegrity()) {
oldRows.beforeFirst();
while (oldRows.hasNext()) {
oldRows.next();
Row row = oldRows.getCurrentRow();
path.clear();
checkCascadeDelete(session, table, tableUpdateList, row, true, path);
}
}
oldRows.beforeFirst();
while (oldRows.hasNext()) {
oldRows.next();
Row row = oldRows.getCurrentRow();
if (!row.isDeleted(session)) {
table.deleteNoRefCheck(session, row);
}
}
for (int i = 0; i < tableUpdateList.size(); i++) {
Table targetTable = (Table) tableUpdateList.getKey(i);
HashMappedList updateList = (HashMappedList) tableUpdateList.get(i);
if (updateList.size() > 0) {
targetTable.updateRowSet(session, updateList, null, true);
updateList.clear();
}
}
oldRows.beforeFirst();
if (table.hasTrigger(Trigger.DELETE_AFTER)) {
table.fireAfterTriggers(session, Trigger.DELETE_AFTER, oldRows);
}
path.clear();
return oldRows.getSize();
}
use of org.hsqldb_voltpatches.lib.HashSet in project voltdb by VoltDB.
the class StatementDML method update.
/**
* Highest level multiple row update method. Corresponds to an SQL UPDATE.
* To deal with unique constraints we need to perform all deletes at once
* before the inserts. If there is a UNIQUE constraint violation limited
* only to the duration of updating multiple rows, we don't want to abort
* the operation. Example: UPDATE MYTABLE SET UNIQUECOL = UNIQUECOL + 1
* After performing each cascade update, delete the main row. After all
* cascade ops and deletes have been performed, insert new rows.<p>
*
* Following clauses from SQL Standard section 11.8 are enforced 9) Let ISS
* be the innermost SQL-statement being executed. 10) If evaluation of these
* General Rules during the execution of ISS would cause an update of some
* site to a value that is distinct from the value to which that site was
* previously updated during the execution of ISS, then an exception
* condition is raised: triggered data change violation. 11) If evaluation
* of these General Rules during the execution of ISS would cause deletion
* of a row containing a site that is identified for replacement in that
* row, then an exception condition is raised: triggered data change
* violation. (fredt)
*
* @param session Session
* @param table Table
* @param updateList HashMappedList
* @return int
*/
int update(Session session, Table table, HashMappedList updateList) {
HashSet path = session.sessionContext.getConstraintPath();
HashMappedList tableUpdateList = session.sessionContext.getTableUpdateList();
// set identity column where null and check columns
for (int i = 0; i < updateList.size(); i++) {
Row row = (Row) updateList.getKey(i);
Object[] data = (Object[]) updateList.get(i);
/**
* @todo 1.9.0 - make optional using database property - this means the identity column can be set to null to force
* creation of a new identity value
*/
table.setIdentityColumn(session, data);
if (table.triggerLists[Trigger.UPDATE_BEFORE].length != 0) {
table.fireBeforeTriggers(session, Trigger.UPDATE_BEFORE, row.getData(), data, updateColumnMap);
}
table.enforceRowConstraints(session, data);
}
if (table.isView) {
return updateList.size();
}
// perform check/cascade operations
if (session.database.isReferentialIntegrity()) {
for (int i = 0; i < updateList.size(); i++) {
Object[] data = (Object[]) updateList.get(i);
Row row = (Row) updateList.getKey(i);
checkCascadeUpdate(session, table, tableUpdateList, row, data, updateColumnMap, null, path);
}
}
// merge any triggered change to this table with the update list
HashMappedList triggeredList = (HashMappedList) tableUpdateList.get(table);
if (triggeredList != null) {
for (int i = 0; i < triggeredList.size(); i++) {
Row row = (Row) triggeredList.getKey(i);
Object[] data = (Object[]) triggeredList.get(i);
mergeKeepUpdate(session, updateList, updateColumnMap, table.colTypes, row, data);
}
triggeredList.clear();
}
// update lists - main list last
for (int i = 0; i < tableUpdateList.size(); i++) {
Table targetTable = (Table) tableUpdateList.getKey(i);
HashMappedList updateListT = (HashMappedList) tableUpdateList.get(i);
targetTable.updateRowSet(session, updateListT, null, true);
updateListT.clear();
}
table.updateRowSet(session, updateList, updateColumnMap, false);
path.clear();
return updateList.size();
}
use of org.hsqldb_voltpatches.lib.HashSet in project voltdb by VoltDB.
the class DatabaseInformationFull method SYSTEM_CACHEINFO.
/**
* Retrieves a <code>Table</code> object describing the current
* state of all row caching objects for the accessible
* tables defined within this database. <p>
*
* Currently, the row caching objects for which state is reported are: <p>
*
* <OL>
* <LI> the system-wide <code>Cache</code> object used by CACHED tables.
* <LI> any <code>TextCache</code> objects in use by [TEMP] TEXT tables.
* </OL> <p>
*
* Each row is a cache object state description with the following
* columns: <p>
*
* <pre class="SqlCodeExample">
* CACHE_FILE CHARACTER_DATA absolute path of cache data file
* MAX_CACHE_SIZE INTEGER maximum allowable cached Row objects
* MAX_CACHE_BYTE_SIZE INTEGER maximum allowable size of cached Row objects
* CACHE_LENGTH INTEGER number of data bytes currently cached
* CACHE_SIZE INTEGER number of rows currently cached
* FREE_BYTES INTEGER total bytes in available file allocation units
* FREE_COUNT INTEGER total # of allocation units available
* FREE_POS INTEGER largest file position allocated + 1
* </pre> <p>
*
* <b>Notes:</b> <p>
*
* <code>TextCache</code> objects do not maintain a free list because
* deleted rows are only marked deleted and never reused. As such, the
* columns FREE_BYTES, SMALLEST_FREE_ITEM, LARGEST_FREE_ITEM, and
* FREE_COUNT are always reported as zero for rows reporting on
* <code>TextCache</code> objects. <p>
*
* Currently, CACHE_SIZE, FREE_BYTES, SMALLEST_FREE_ITEM, LARGEST_FREE_ITEM,
* FREE_COUNT and FREE_POS are the only dynamically changing values.
* All others are constant for the life of a cache object. In a future
* release, other column values may also change over the life of a cache
* object, as SQL syntax may eventually be introduced to allow runtime
* modification of certain cache properties. <p>
*
* @return a description of the current state of all row caching
* objects associated with the accessible tables of the database
*/
Table SYSTEM_CACHEINFO() {
Table t = sysTables[SYSTEM_CACHEINFO];
if (t == null) {
t = createBlankTable(sysTableHsqlNames[SYSTEM_CACHEINFO]);
// not null
addColumn(t, "CACHE_FILE", CHARACTER_DATA);
// not null
addColumn(t, "MAX_CACHE_COUNT", CARDINAL_NUMBER);
// not null
addColumn(t, "MAX_CACHE_BYTES", CARDINAL_NUMBER);
// not null
addColumn(t, "CACHE_SIZE", CARDINAL_NUMBER);
// not null
addColumn(t, "CACHE_BYTES", CARDINAL_NUMBER);
// not null
addColumn(t, "FILE_FREE_BYTES", CARDINAL_NUMBER);
// not null
addColumn(t, "FILE_FREE_COUNT", CARDINAL_NUMBER);
// not null
addColumn(t, "FILE_FREE_POS", CARDINAL_NUMBER);
HsqlName name = HsqlNameManager.newInfoSchemaObjectName(sysTableHsqlNames[SYSTEM_CACHEINFO].name, false, SchemaObject.INDEX);
t.createPrimaryKey(name, new int[] { 0 }, true);
return t;
}
// column number mappings
final int icache_file = 0;
final int imax_cache_sz = 1;
final int imax_cache_bytes = 2;
final int icache_size = 3;
final int icache_length = 4;
final int ifree_bytes = 5;
final int ifree_count = 6;
final int ifree_pos = 7;
//
PersistentStore store = database.persistentStoreCollection.getStore(t);
DataFileCache cache = null;
Object[] row;
HashSet cacheSet;
Iterator caches;
Iterator tables;
Table table;
int iFreeBytes;
int iLargestFreeItem;
long lSmallestFreeItem;
// Initialization
cacheSet = new HashSet();
// dynamic system tables are never cached
tables = database.schemaManager.databaseObjectIterator(SchemaObject.TABLE);
while (tables.hasNext()) {
table = (Table) tables.next();
PersistentStore currentStore = database.persistentStoreCollection.getStore(t);
if (session.getGrantee().isFullyAccessibleByRole(table)) {
if (currentStore != null) {
cache = currentStore.getCache();
}
if (cache != null) {
cacheSet.add(cache);
}
}
}
caches = cacheSet.iterator();
// Do it.
while (caches.hasNext()) {
cache = (DataFileCache) caches.next();
row = t.getEmptyRowData();
row[icache_file] = FileUtil.getDefaultInstance().canonicalOrAbsolutePath(cache.getFileName());
row[imax_cache_sz] = ValuePool.getInt(cache.capacity());
row[imax_cache_bytes] = ValuePool.getLong(cache.bytesCapacity());
row[icache_size] = ValuePool.getInt(cache.getCachedObjectCount());
row[icache_length] = ValuePool.getLong(cache.getTotalCachedBlockSize());
row[ifree_bytes] = ValuePool.getInt(cache.getTotalFreeBlockSize());
row[ifree_count] = ValuePool.getInt(cache.getFreeBlockCount());
row[ifree_pos] = ValuePool.getLong(cache.getFileFreePos());
t.insertSys(store, row);
}
return t;
}
use of org.hsqldb_voltpatches.lib.HashSet in project voltdb by VoltDB.
the class Grantee method visibleGrantees.
/**
* Iteration of all visible grantees, including self. <p>
*
* For grantees with admin, this is all grantees.
* For regular grantees, this is self plus all roles granted directly
* or indirectly
*/
public Set visibleGrantees() {
HashSet grantees = new HashSet();
GranteeManager gm = granteeManager;
if (isAdmin()) {
grantees.addAll(gm.getGrantees());
} else {
grantees.add(this);
Iterator it = getAllRoles().iterator();
while (it.hasNext()) {
grantees.add(it.next());
}
}
return grantees;
}
use of org.hsqldb_voltpatches.lib.HashSet in project voltdb by VoltDB.
the class DINameSpace method iterateAllAccessibleMethods.
/** @todo - fredt - there are no class grants in 1.9 */
/**
* @return a composite <code>Iterator</code>
* @param session The context in which to produce the iterator
* @param andAliases true if the alias lists for the "ROUTINE" type method
* elements are to be generated.
*/
Iterator iterateAllAccessibleMethods(Session session, boolean andAliases) {
Iterator out;
HashSet classNameSet;
Iterator classNames;
Iterator methods;
String className;
out = new WrapperIterator();
/*
while (classNames.hasNext()) {
className = (String) classNames.next();
methods = iterateRoutineMethods(className, andAliases);
out = new WrapperIterator(out, methods);
}
*/
return out;
}
Aggregations