use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class SecurityUtil method authorize.
/**
* Raise an exception if the current user does not have permission
* to perform the indicated operation.
*/
public static void authorize(Securable operation) throws StandardException {
LanguageConnectionContext lcc = (LanguageConnectionContext) getContextOrNull(LanguageConnectionContext.CONTEXT_ID);
if (lcc.usesSqlAuthorization()) {
Authorizer authorizer = lcc.getAuthorizer();
DataDictionary dd = lcc.getDataDictionary();
AliasDescriptor ad = dd.getRoutineList(operation.routineSchemaID, operation.routineName, operation.routineType).get(0);
ArrayList<StatementPermission> requiredPermissions = new ArrayList<StatementPermission>();
StatementRoutinePermission executePermission = new StatementRoutinePermission(ad.getObjectID());
requiredPermissions.add(executePermission);
authorizer.authorize(requiredPermissions, lcc.getLastActivation());
}
}
use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class AuthenticationServiceBase method validate.
/**
* @see PropertySetCallback#validate
*/
public boolean validate(String key, Serializable value, Dictionary p) throws StandardException {
// user password properties need to be remapped. nothing else needs remapping.
if (key.startsWith(org.apache.derby.shared.common.reference.Property.USER_PROPERTY_PREFIX)) {
return true;
}
String stringValue = (String) value;
boolean settingToNativeLocal = Property.AUTHENTICATION_PROVIDER_NATIVE_LOCAL.equals(stringValue);
if (Property.AUTHENTICATION_PROVIDER_PARAMETER.equals(key)) {
// NATIVE + LOCAL is the only value of this property which can be persisted
if ((stringValue != null) && (stringValue.startsWith(Property.AUTHENTICATION_PROVIDER_NATIVE)) && !settingToNativeLocal) {
throw StandardException.newException(SQLState.PROPERTY_DBO_LACKS_CREDENTIALS);
}
// once set to NATIVE authentication, you can't change it
String oldValue = (String) p.get(Property.AUTHENTICATION_PROVIDER_PARAMETER);
if ((oldValue != null) && oldValue.startsWith(Property.AUTHENTICATION_PROVIDER_NATIVE)) {
throw StandardException.newException(SQLState.PROPERTY_CANT_UNDO_NATIVE);
}
// because you can't store credentials in a pre-10.9 database.
if (settingToNativeLocal) {
DataDictionary dd = getDataDictionary();
String dbo = dd.getAuthorizationDatabaseOwner();
UserDescriptor userCredentials = dd.getUser(dbo);
if (userCredentials == null) {
throw StandardException.newException(SQLState.PROPERTY_DBO_LACKS_CREDENTIALS);
}
}
}
if (Property.AUTHENTICATION_NATIVE_PASSWORD_LIFETIME.equals(key)) {
if (parsePasswordLifetime(stringValue) == null) {
throw StandardException.newException(SQLState.BAD_PASSWORD_LIFETIME, Property.AUTHENTICATION_NATIVE_PASSWORD_LIFETIME);
}
}
if (Property.AUTHENTICATION_PASSWORD_EXPIRATION_THRESHOLD.equals(key)) {
if (parsePasswordThreshold(stringValue) == null) {
throw StandardException.newException(SQLState.BAD_PASSWORD_LIFETIME, Property.AUTHENTICATION_PASSWORD_EXPIRATION_THRESHOLD);
}
}
return false;
}
use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class IndexStatisticsDaemonImpl method updateIndexStatsMinion.
/**
* Updates the index statistics for the given table and the specified
* indexes.
* <p>
* <strong>API note</strong>: Using {@code null} to update the statistics
* for all conglomerates is preferred over explicitly passing an array with
* all the conglomerates for the table. Doing so allows for some
* optimizations, and will cause a disposable statistics check to be
* performed.
*
* @param lcc language connection context used to perform the work
* @param td the table to update index stats for
* @param cds the conglomerates to update statistics for (non-index
* conglomerates will be ignored), {@code null} means all indexes
* @param asBackgroundTask whether the updates are done automatically as
* part of a background task or if explicitly invoked by the user
* @throws StandardException if something goes wrong
*/
private void updateIndexStatsMinion(LanguageConnectionContext lcc, TableDescriptor td, ConglomerateDescriptor[] cds, boolean asBackgroundTask) throws StandardException {
// can only properly identify disposable stats if cds == null,
// which means we are processing all indexes on the conglomerate.
final boolean identifyDisposableStats = (cds == null);
// Fetch descriptors if we're updating statistics for all indexes.
if (cds == null) {
cds = td.getConglomerateDescriptors();
}
// Extract/derive information from the table descriptor
long[] conglomerateNumber = new long[cds.length];
ExecIndexRow[] indexRow = new ExecIndexRow[cds.length];
TransactionController tc = lcc.getTransactionExecute();
ConglomerateController heapCC = tc.openConglomerate(td.getHeapConglomerateId(), false, 0, TransactionController.MODE_RECORD, asBackgroundTask ? TransactionController.ISOLATION_READ_UNCOMMITTED : TransactionController.ISOLATION_REPEATABLE_READ);
// create a list of indexes that should have statistics, by looking
// at all indexes on the conglomerate, and conditionally skipping
// unique single column indexes. This set is the "non disposable
// stat list".
UUID[] non_disposable_objectUUID = new UUID[cds.length];
try {
for (int i = 0; i < cds.length; i++) {
// Skip non-index conglomerates
if (!cds[i].isIndex()) {
conglomerateNumber[i] = -1;
continue;
}
IndexRowGenerator irg = cds[i].getIndexDescriptor();
// or we are running in soft-upgrade-mode on a pre 10.9 db.
if (skipDisposableStats) {
if (irg.isUnique() && irg.numberOfOrderedColumns() == 1) {
conglomerateNumber[i] = -1;
continue;
}
}
// at this point have found a stat for an existing
// index which is not a single column unique index, add it
// to the list of "non disposable stats"
conglomerateNumber[i] = cds[i].getConglomerateNumber();
non_disposable_objectUUID[i] = cds[i].getUUID();
indexRow[i] = irg.getNullIndexRow(td.getColumnDescriptorList(), heapCC.newRowLocationTemplate());
}
} finally {
heapCC.close();
}
if (identifyDisposableStats) {
// Note this loop is not controlled by the skipDisposableStats
// flag. The above loop controls if we drop single column unique
// index stats or not. In all cases we are going to drop
// stats with no associated index (orphaned stats).
List<StatisticsDescriptor> existingStats = td.getStatistics();
StatisticsDescriptor[] stats = (StatisticsDescriptor[]) existingStats.toArray(new StatisticsDescriptor[existingStats.size()]);
// those entries that don't have a matching conglomerate in the
for (int si = 0; si < stats.length; si++) {
UUID referencedIndex = stats[si].getReferenceID();
boolean isValid = false;
for (int ci = 0; ci < conglomerateNumber.length; ci++) {
if (referencedIndex.equals(non_disposable_objectUUID[ci])) {
isValid = true;
break;
}
}
// mechanism in case of another bug like DERBY-5681 in Derby.
if (!isValid) {
String msg = "dropping disposable statistics entry " + stats[si].getUUID() + " for index " + stats[si].getReferenceID() + " (cols=" + stats[si].getColumnCount() + ")";
logAlways(td, null, msg);
trace(1, msg + " on table " + stats[si].getTableUUID());
DataDictionary dd = lcc.getDataDictionary();
if (!lcc.dataDictionaryInWriteMode()) {
dd.startWriting(lcc);
}
dd.dropStatisticsDescriptors(td.getUUID(), stats[si].getReferenceID(), tc);
if (asBackgroundTask) {
lcc.internalCommit(true);
}
}
}
}
// [x][0] = conglomerate number, [x][1] = start time, [x][2] = stop time
long[][] scanTimes = new long[conglomerateNumber.length][3];
int sci = 0;
for (int indexNumber = 0; indexNumber < conglomerateNumber.length; indexNumber++) {
if (conglomerateNumber[indexNumber] == -1)
continue;
// Check if daemon has been disabled.
if (asBackgroundTask) {
if (isShuttingDown()) {
break;
}
}
scanTimes[sci][0] = conglomerateNumber[indexNumber];
scanTimes[sci][1] = System.currentTimeMillis();
// Subtract one for the RowLocation added for indexes.
int numCols = indexRow[indexNumber].nColumns() - 1;
long[] cardinality = new long[numCols];
KeyComparator cmp = new KeyComparator(indexRow[indexNumber]);
/* Read uncommitted, with record locking. Actually CS store may
not hold record locks */
GroupFetchScanController gsc = tc.openGroupFetchScan(conglomerateNumber[indexNumber], // hold
false, 0, // locking
TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED, // scancolumnlist-- want everything.
null, // startkeyvalue-- start from the beginning.
null, 0, // qualifiers, none!
null, // stopkeyvalue,
null, 0);
try {
int rowsFetched = 0;
boolean giving_up_on_shutdown = false;
while ((rowsFetched = cmp.fetchRows(gsc)) > 0) {
// I/O that is processed as a convenient point.
if (asBackgroundTask) {
if (isShuttingDown()) {
giving_up_on_shutdown = true;
break;
}
}
for (int i = 0; i < rowsFetched; i++) {
int whichPositionChanged = cmp.compareWithPrevKey(i);
if (whichPositionChanged >= 0) {
for (int j = whichPositionChanged; j < numCols; j++) cardinality[j]++;
}
}
}
if (giving_up_on_shutdown)
break;
gsc.setEstimatedRowCount(cmp.getRowCount());
} finally // try
{
gsc.close();
gsc = null;
}
scanTimes[sci++][2] = System.currentTimeMillis();
// We have scanned the indexes, so let's give this a few attempts
// before giving up.
int retries = 0;
while (true) {
try {
writeUpdatedStats(lcc, td, non_disposable_objectUUID[indexNumber], cmp.getRowCount(), cardinality, asBackgroundTask);
break;
} catch (StandardException se) {
retries++;
if (se.isLockTimeout() && retries < 3) {
trace(2, "lock timeout when writing stats, retrying");
sleep(100 * retries);
} else {
// o too many lock timeouts
throw se;
}
}
}
}
log(asBackgroundTask, td, fmtScanTimes(scanTimes));
}
use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class FromBaseTable method pushIndexName.
/* helper method used by generateMaxSpecialResultSet and
* generateDistinctScan to return the name of the index if the
* conglomerate is an index.
* @param cd Conglomerate for which we need to push the index name
* @param mb Associated MethodBuilder
* @throws StandardException
*/
private void pushIndexName(ConglomerateDescriptor cd, MethodBuilder mb) throws StandardException {
if (cd.isConstraint()) {
DataDictionary dd = getDataDictionary();
ConstraintDescriptor constraintDesc = dd.getConstraintDescriptor(tableDescriptor, cd.getUUID());
mb.push(constraintDesc.getConstraintName());
} else if (cd.isIndex()) {
mb.push(cd.getConglomerateName());
} else {
// If the conglomerate is the base table itself, make sure we push
// null. Before the fix for DERBY-578, we would push the base table
// name and this was just plain wrong and would cause statistics
// information to be incorrect.
mb.pushNull("java.lang.String");
}
}
use of org.apache.derby.iapi.sql.dictionary.DataDictionary in project derby by apache.
the class CreateTableNode method bindStatement.
// We inherit the generate() method from DDLStatementNode.
/**
* Bind this CreateTableNode. This means doing any static error checking that can be
* done before actually creating the base table or declaring the global temporary table.
* For eg, verifying that the TableElementList does not contain any duplicate column names.
*
* @exception StandardException Thrown on error
*/
@Override
public void bindStatement() throws StandardException {
DataDictionary dataDictionary = getDataDictionary();
int numPrimaryKeys;
int numCheckConstraints;
int numReferenceConstraints;
int numUniqueConstraints;
int numGenerationClauses;
SchemaDescriptor sd = getSchemaDescriptor(tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE, true);
if (queryExpression != null) {
FromList fromList = new FromList(getOptimizerFactory().doJoinOrderOptimization(), getContextManager());
CompilerContext cc = getCompilerContext();
ProviderList prevAPL = cc.getCurrentAuxiliaryProviderList();
ProviderList apl = new ProviderList();
try {
cc.setCurrentAuxiliaryProviderList(apl);
cc.pushCurrentPrivType(Authorizer.SELECT_PRIV);
/* Bind the tables in the queryExpression */
queryExpression = queryExpression.bindNonVTITables(dataDictionary, fromList);
queryExpression = queryExpression.bindVTITables(fromList);
/* Bind the expressions under the resultSet */
queryExpression.bindExpressions(fromList);
/* Bind the query expression */
queryExpression.bindResultColumns(fromList);
/* Reject any untyped nulls in the RCL */
/* e.g. CREATE TABLE t1 (x) AS VALUES NULL WITH NO DATA */
queryExpression.bindUntypedNullsToResultColumns(null);
} finally {
cc.popCurrentPrivType();
cc.setCurrentAuxiliaryProviderList(prevAPL);
}
/* If there is an RCL for the table definition then copy the
* names to the queryExpression's RCL after verifying that
* they both have the same size.
*/
ResultColumnList qeRCL = queryExpression.getResultColumns();
if (resultColumns != null) {
if (resultColumns.size() != qeRCL.visibleSize()) {
throw StandardException.newException(SQLState.LANG_TABLE_DEFINITION_R_C_L_MISMATCH, getFullName());
}
qeRCL.copyResultColumnNames(resultColumns);
}
int schemaCollationType = sd.getCollationType();
/* Create table element list from columns in query expression */
tableElementList = new TableElementList(getContextManager());
for (ResultColumn rc : qeRCL) {
if (rc.isGenerated()) {
continue;
}
/* Raise error if column name is system generated. */
if (rc.isNameGenerated()) {
throw StandardException.newException(SQLState.LANG_TABLE_REQUIRES_COLUMN_NAMES);
}
DataTypeDescriptor dtd = rc.getExpression().getTypeServices();
if ((dtd != null) && !dtd.isUserCreatableType()) {
throw StandardException.newException(SQLState.LANG_INVALID_COLUMN_TYPE_CREATE_TABLE, dtd.getFullSQLTypeName(), rc.getName());
}
// a territory based database.
if (dtd.getTypeId().isStringTypeId() && dtd.getCollationType() != schemaCollationType) {
throw StandardException.newException(SQLState.LANG_CAN_NOT_CREATE_TABLE, dtd.getCollationName(), DataTypeDescriptor.getCollationName(schemaCollationType));
}
ColumnDefinitionNode column = new ColumnDefinitionNode(rc.getName(), null, rc.getType(), null, getContextManager());
tableElementList.addTableElement(column);
}
} else {
// Set the collation type and collation derivation of all the
// character type columns. Their collation type will be same as the
// collation of the schema they belong to. Their collation
// derivation will be "implicit".
// Earlier we did this in makeConstantAction but that is little too
// late (DERBY-2955)
// eg
// CREATE TABLE STAFF9 (EMPNAME CHAR(20),
// CONSTRAINT STAFF9_EMPNAME CHECK (EMPNAME NOT LIKE 'T%'))
// For the query above, when run in a territory based db, we need
// to have the correct collation set in bind phase of create table
// so that when LIKE is handled in LikeEscapeOperatorNode, we have
// the correct collation set for EMPNAME otherwise it will throw an
// exception for 'T%' having collation of territory based and
// EMPNAME having the default collation of UCS_BASIC
tableElementList.setCollationTypesOnCharacterStringColumns(getSchemaDescriptor(tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE, true));
}
tableElementList.validate(this, dataDictionary, (TableDescriptor) null);
/* Only 1012 columns allowed per table */
if (tableElementList.countNumberOfColumns() > Limits.DB2_MAX_COLUMNS_IN_TABLE) {
throw StandardException.newException(SQLState.LANG_TOO_MANY_COLUMNS_IN_TABLE_OR_VIEW, String.valueOf(tableElementList.countNumberOfColumns()), getRelativeName(), String.valueOf(Limits.DB2_MAX_COLUMNS_IN_TABLE));
}
numPrimaryKeys = tableElementList.countConstraints(DataDictionary.PRIMARYKEY_CONSTRAINT);
/* Only 1 primary key allowed per table */
if (numPrimaryKeys > 1) {
throw StandardException.newException(SQLState.LANG_TOO_MANY_PRIMARY_KEY_CONSTRAINTS, getRelativeName());
}
/* Check the validity of all check constraints */
numCheckConstraints = tableElementList.countConstraints(DataDictionary.CHECK_CONSTRAINT);
numReferenceConstraints = tableElementList.countConstraints(DataDictionary.FOREIGNKEY_CONSTRAINT);
numUniqueConstraints = tableElementList.countConstraints(DataDictionary.UNIQUE_CONSTRAINT);
numGenerationClauses = tableElementList.countGenerationClauses();
// temp tables can't have primary key or check or foreign key or unique constraints defined on them
if ((tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) && (numPrimaryKeys > 0 || numCheckConstraints > 0 || numReferenceConstraints > 0 || numUniqueConstraints > 0))
throw StandardException.newException(SQLState.LANG_NOT_ALLOWED_FOR_DECLARED_GLOBAL_TEMP_TABLE);
// more than 32767 indexes on it and that is why this check.
if ((numPrimaryKeys + numReferenceConstraints + numUniqueConstraints) > Limits.DB2_MAX_INDEXES_ON_TABLE) {
throw StandardException.newException(SQLState.LANG_TOO_MANY_INDEXES_ON_TABLE, String.valueOf(numPrimaryKeys + numReferenceConstraints + numUniqueConstraints), getRelativeName(), String.valueOf(Limits.DB2_MAX_INDEXES_ON_TABLE));
}
if ((numCheckConstraints > 0) || (numGenerationClauses > 0) || (numReferenceConstraints > 0)) {
/* In order to check the validity of the check constraints and
* generation clauses
* we must goober up a FromList containing a single table,
* the table being created, with an RCL containing the
* new columns and their types. This will allow us to
* bind the constraint definition trees against that
* FromList. When doing this, we verify that there are
* no nodes which can return non-deterministic results.
*/
FromList fromList = makeFromList(null, tableElementList, true);
FormatableBitSet generatedColumns = new FormatableBitSet();
/* Now that we've finally goobered stuff up, bind and validate
* the check constraints and generation clauses.
*/
if (numGenerationClauses > 0) {
tableElementList.bindAndValidateGenerationClauses(sd, fromList, generatedColumns, null);
}
if (numCheckConstraints > 0) {
tableElementList.bindAndValidateCheckConstraints(fromList);
}
if (numReferenceConstraints > 0) {
tableElementList.validateForeignKeysOnGenerationClauses(fromList, generatedColumns);
}
}
if (numPrimaryKeys > 0) {
tableElementList.validatePrimaryKeyNullability();
}
}
Aggregations