use of org.hsqldb_voltpatches.persist.PersistentStore in project voltdb by VoltDB.
the class Constraint method checkInsert.
/**
* Checks for foreign key or check constraint violation when
* inserting a row into the child table.
*/
void checkInsert(Session session, Table table, Object[] row) {
switch(constType) {
case CHECK:
if (!isNotNull) {
checkCheckConstraint(session, table, row);
}
return;
case FOREIGN_KEY:
PersistentStore store = session.sessionData.getRowStore(core.mainTable);
if (ArrayUtil.hasNull(row, core.refCols)) {
if (core.matchType == OpTypes.MATCH_SIMPLE) {
return;
}
if (core.refCols.length == 1) {
return;
}
if (ArrayUtil.hasAllNull(row, core.refCols)) {
return;
}
// core.matchType == OpTypes.MATCH_FULL
} else if (core.mainIndex.exists(session, store, row, core.refCols)) {
return;
} else if (core.mainTable == core.refTable) {
// special case: self referencing table and self referencing row
int compare = core.mainIndex.compareRowNonUnique(row, core.refCols, row);
if (compare == 0) {
return;
}
}
String[] info = new String[] { core.refName.name, core.mainTable.getName().name };
throw Error.error(ErrorCode.X_23502, ErrorCode.CONSTRAINT, info);
}
}
use of org.hsqldb_voltpatches.persist.PersistentStore in project voltdb by VoltDB.
the class Constraint method checkReferencedRows.
/**
* Check used before creating a new foreign key cosntraint, this method
* checks all rows of a table to ensure they all have a corresponding
* row in the main table.
*/
void checkReferencedRows(Session session, Table table, int[] rowColArray) {
Index mainIndex = getMainIndex();
PersistentStore store = session.sessionData.getRowStore(table);
RowIterator it = table.rowIterator(session);
while (true) {
Row row = it.getNextRow();
if (row == null) {
break;
}
Object[] rowData = row.getData();
if (ArrayUtil.hasNull(rowData, rowColArray)) {
if (core.matchType == OpTypes.MATCH_SIMPLE) {
continue;
}
} else if (mainIndex.exists(session, store, rowData, rowColArray)) {
continue;
}
if (ArrayUtil.hasAllNull(rowData, rowColArray)) {
continue;
}
String colValues = "";
for (int i = 0; i < rowColArray.length; i++) {
Object o = rowData[rowColArray[i]];
colValues += table.getColumnTypes()[i].convertToString(o);
colValues += ",";
}
String[] info = new String[] { getName().name, getMain().getName().name };
throw Error.error(ErrorCode.X_23502, ErrorCode.CONSTRAINT, info);
}
}
use of org.hsqldb_voltpatches.persist.PersistentStore in project voltdb by VoltDB.
the class DatabaseInformationFull method TRIGGERED_UPDATE_COLUMNS.
Table TRIGGERED_UPDATE_COLUMNS() {
Table t = sysTables[TRIGGERED_UPDATE_COLUMNS];
if (t == null) {
t = createBlankTable(sysTableHsqlNames[TRIGGERED_UPDATE_COLUMNS]);
addColumn(t, "TRIGGER_CATALOG", SQL_IDENTIFIER);
addColumn(t, "TRIGGER_SCHEMA", SQL_IDENTIFIER);
// not null
addColumn(t, "TRIGGER_NAME", SQL_IDENTIFIER);
// not null
addColumn(t, "EVENT_OBJECT_CATALOG", SQL_IDENTIFIER);
addColumn(t, "EVENT_OBJECT_SCHEMA", SQL_IDENTIFIER);
addColumn(t, "EVENT_OBJECT_TABLE", SQL_IDENTIFIER);
// not null
addColumn(t, "EVENT_OBJECT_COLUMN", SQL_IDENTIFIER);
HsqlName name = HsqlNameManager.newInfoSchemaObjectName(sysTableHsqlNames[TRIGGERED_UPDATE_COLUMNS].name, false, SchemaObject.INDEX);
t.createPrimaryKey(name, new int[] { 0, 1, 2, 3, 4, 5, 6 }, false);
return t;
}
PersistentStore store = database.persistentStoreCollection.getStore(t);
// column number mappings
final int trigger_catalog = 0;
final int trigger_schema = 1;
final int trigger_name = 2;
final int event_object_catalog = 3;
final int event_object_schema = 4;
final int event_object_table = 5;
final int event_object_column = 6;
Iterator it;
Object[] row;
it = database.schemaManager.databaseObjectIterator(SchemaObject.TRIGGER);
while (it.hasNext()) {
TriggerDef trigger = (TriggerDef) it.next();
if (!session.getGrantee().isAccessible(trigger)) {
continue;
}
int[] colIndexes = trigger.getUpdateColumnIndexes();
if (colIndexes == null) {
continue;
}
for (int i = 0; i < colIndexes.length; i++) {
ColumnSchema column = trigger.getTable().getColumn(colIndexes[i]);
row = t.getEmptyRowData();
row[trigger_catalog] = database.getCatalogName().name;
row[trigger_schema] = trigger.getSchemaName().name;
row[trigger_name] = trigger.getName().name;
row[event_object_catalog] = database.getCatalogName().name;
row[event_object_schema] = trigger.getTable().getSchemaName().name;
row[event_object_table] = trigger.getTable().getName().name;
row[event_object_column] = column.getNameString();
t.insertSys(store, row);
}
}
// Initialization
return t;
}
use of org.hsqldb_voltpatches.persist.PersistentStore in project voltdb by VoltDB.
the class DatabaseInformationFull method SYSTEM_PROPERTIES.
/**
* Retrieves a <code>Table</code> object describing the capabilities
* and operating parameter properties for the engine hosting this
* database, as well as their applicability in terms of scope and
* name space. <p>
*
* Reported properties include certain predefined <code>Database</code>
* properties file values as well as certain database scope
* attributes. <p>
*
* It is intended that all <code>Database</code> attributes and
* properties that can be set via the database properties file,
* JDBC connection properties or SQL SET/ALTER statements will
* eventually be reported here or, where more applicable, in an
* ANSI/ISO conforming feature info base table in the defintion
* schema. <p>
*
* Currently, the database properties reported are: <p>
*
* <OL>
* <LI>hsqldb.cache_file_scale - the scaling factor used to translate data and index structure file pointers
* <LI>hsqldb.cache_scale - base-2 exponent scaling allowable cache row count
* <LI>hsqldb.cache_size_scale - base-2 exponent scaling allowable cache byte count
* <LI>hsqldb.cache_version -
* <LI>hsqldb.catalogs - whether to report the database catalog (database uri)
* <LI>hsqldb.compatible_version -
* <LI>hsqldb.files_readonly - whether the database is in files_readonly mode
* <LI>hsqldb.gc_interval - # new records forcing gc ({0|NULL}=>never)
* <LI>hsqldb.max_nio_scale - scale factor for cache nio mapped buffers
* <LI>hsqldb.nio_data_file - whether cache uses nio mapped buffers
* <LI>hsqldb.original_version -
* <LI>sql.enforce_strict_size - column length specifications enforced strictly (raise exception on overflow)?
* <LI>textdb.all_quoted - default policy regarding whether to quote all character field values
* <LI>textdb.cache_scale - base-2 exponent scaling allowable cache row count
* <LI>textdb.cache_size_scale - base-2 exponent scaling allowable cache byte count
* <LI>textdb.encoding - default TEXT table file encoding
* <LI>textdb.fs - default field separator
* <LI>textdb.vs - default varchar field separator
* <LI>textdb.lvs - default long varchar field separator
* <LI>textdb.ignore_first - default policy regarding whether to ignore the first line
* <LI>textdb.quoted - default policy regarding treatement character field values that _may_ require quoting
* <LI>IGNORECASE - create table VARCHAR_IGNORECASE?
* <LI>LOGSIZSE - # bytes to which REDO log grows before auto-checkpoint
* <LI>REFERENTIAL_INTEGITY - currently enforcing referential integrity?
* <LI>SCRIPTFORMAT - 0 : TEXT, 1 : BINARY, ...
* <LI>WRITEDELAY - does REDO log currently use buffered write strategy?
* </OL> <p>
*
* @return table describing database and session operating parameters
* and capabilities
*/
Table SYSTEM_PROPERTIES() {
Table t = sysTables[SYSTEM_PROPERTIES];
if (t == null) {
t = createBlankTable(sysTableHsqlNames[SYSTEM_PROPERTIES]);
addColumn(t, "PROPERTY_SCOPE", CHARACTER_DATA);
addColumn(t, "PROPERTY_NAMESPACE", CHARACTER_DATA);
addColumn(t, "PROPERTY_NAME", CHARACTER_DATA);
addColumn(t, "PROPERTY_VALUE", CHARACTER_DATA);
addColumn(t, "PROPERTY_CLASS", CHARACTER_DATA);
// order PROPERTY_SCOPE, PROPERTY_NAMESPACE, PROPERTY_NAME
// true PK
HsqlName name = HsqlNameManager.newInfoSchemaObjectName(sysTableHsqlNames[SYSTEM_PROPERTIES].name, false, SchemaObject.INDEX);
t.createPrimaryKey(name, new int[] { 0, 1, 2 }, true);
return t;
}
// column number mappings
final int iscope = 0;
final int ins = 1;
final int iname = 2;
final int ivalue = 3;
final int iclass = 4;
//
PersistentStore store = database.persistentStoreCollection.getStore(t);
// calculated column values
String scope;
String nameSpace;
// intermediate holders
Object[] row;
HsqlDatabaseProperties props;
// First, we want the names and values for
// all JDBC capabilities constants
scope = "SESSION";
props = database.getProperties();
nameSpace = "database.properties";
// boolean properties
Iterator it = props.getUserDefinedPropertyData().iterator();
while (it.hasNext()) {
Object[] metaData = (Object[]) it.next();
row = t.getEmptyRowData();
row[iscope] = scope;
row[ins] = nameSpace;
row[iname] = metaData[HsqlProperties.indexName];
row[ivalue] = props.getProperty((String) row[iname]);
row[iclass] = metaData[HsqlProperties.indexClass];
t.insertSys(store, row);
}
row = t.getEmptyRowData();
row[iscope] = scope;
row[ins] = nameSpace;
row[iname] = "SCRIPTFORMAT";
try {
row[ivalue] = ScriptWriterBase.LIST_SCRIPT_FORMATS[database.logger.getScriptType()];
} catch (Exception e) {
}
row[iclass] = "java.lang.String";
t.insertSys(store, row);
// write delay
row = t.getEmptyRowData();
row[iscope] = scope;
row[ins] = nameSpace;
row[iname] = "WRITE_DELAY";
row[ivalue] = "" + database.logger.getWriteDelay();
row[iclass] = "int";
t.insertSys(store, row);
// ignore case
row = t.getEmptyRowData();
row[iscope] = scope;
row[ins] = nameSpace;
row[iname] = "IGNORECASE";
row[ivalue] = database.isIgnoreCase() ? "true" : "false";
row[iclass] = "boolean";
t.insertSys(store, row);
// referential integrity
row = t.getEmptyRowData();
row[iscope] = scope;
row[ins] = nameSpace;
row[iname] = "REFERENTIAL_INTEGRITY";
row[ivalue] = database.isReferentialIntegrity() ? "true" : "false";
row[iclass] = "boolean";
t.insertSys(store, row);
return t;
}
use of org.hsqldb_voltpatches.persist.PersistentStore in project voltdb by VoltDB.
the class DatabaseInformationFull method VIEW_TABLE_USAGE.
/**
* The VIEW_TABLE_USAGE table has one row for each table identified
* by a <table name> simply contained in a <table reference>
* that is contained in the <query expression> of a view. <p>
*
* <b>Definition</b><p>
*
* <pre class="SqlCodeExample">
* CREATE TABLE SYSTEM_VIEW_TABLE_USAGE (
* VIEW_CATALOG VARCHAR NULL,
* VIEW_SCHEMA VARCHAR NULL,
* VIEW_NAME VARCHAR NULL,
* TABLE_CATALOG VARCHAR NULL,
* TABLE_SCHEMA VARCHAR NULL,
* TABLE_NAME VARCHAR NULL,
* UNIQUE( VIEW_CATALOG, VIEW_SCHEMA, VIEW_NAME,
* TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME )
* )
* </pre>
*
* <b>Description:</b><p>
*
* <ol>
* <li> The values of VIEW_CATALOG, VIEW_SCHEMA, and VIEW_NAME are the
* catalog name, unqualified schema name, and qualified identifier,
* respectively, of the view being described. <p>
*
* <li> The values of TABLE_CATALOG, TABLE_SCHEMA, and TABLE_NAME are the
* catalog name, unqualified schema name, and qualified identifier,
* respectively, of a table identified by a <table name>
* simply contained in a <table reference> that is contained in
* the <query expression> of the view being described.
* </ol>
*
* @return Table
*/
Table VIEW_TABLE_USAGE() {
Table t = sysTables[VIEW_TABLE_USAGE];
if (t == null) {
t = createBlankTable(sysTableHsqlNames[VIEW_TABLE_USAGE]);
addColumn(t, "VIEW_CATALOG", SQL_IDENTIFIER);
addColumn(t, "VIEW_SCHEMA", SQL_IDENTIFIER);
// not null
addColumn(t, "VIEW_NAME", SQL_IDENTIFIER);
addColumn(t, "TABLE_CATALOG", SQL_IDENTIFIER);
addColumn(t, "TABLE_SCHEMA", SQL_IDENTIFIER);
// not null
addColumn(t, "TABLE_NAME", SQL_IDENTIFIER);
// false PK, as VIEW_CATALOG, VIEW_SCHEMA, TABLE_CATALOG, and/or
// TABLE_SCHEMA may be NULL
HsqlName name = HsqlNameManager.newInfoSchemaObjectName(sysTableHsqlNames[VIEW_TABLE_USAGE].name, false, SchemaObject.INDEX);
t.createPrimaryKey(name, new int[] { 0, 1, 2, 3, 4, 5 }, false);
return t;
}
// Column number mappings
final int view_catalog = 0;
final int view_schema = 1;
final int view_name = 2;
final int table_catalog = 3;
final int table_schema = 4;
final int table_name = 5;
//
PersistentStore store = database.persistentStoreCollection.getStore(t);
Iterator tables;
Table table;
Object[] row;
// Initialization
tables = database.schemaManager.databaseObjectIterator(SchemaObject.TABLE);
// Do it.
while (tables.hasNext()) {
table = (Table) tables.next();
if (table.isView() && session.getGrantee().isFullyAccessibleByRole(table)) {
// $FALL-THROUGH$
} else {
continue;
}
OrderedHashSet references = table.getReferences();
for (int i = 0; i < references.size(); i++) {
HsqlName refName = (HsqlName) references.get(i);
if (!session.getGrantee().isFullyAccessibleByRole(refName)) {
continue;
}
if (refName.type != SchemaObject.TABLE) {
continue;
}
row = t.getEmptyRowData();
row[view_catalog] = database.getCatalogName().name;
row[view_schema] = table.getSchemaName().name;
row[view_name] = table.getName().name;
row[table_catalog] = database.getCatalogName().name;
row[table_schema] = refName.schema.name;
row[table_name] = refName.name;
try {
t.insertSys(store, row);
} catch (HsqlException e) {
}
}
}
return t;
}
Aggregations